diff --git "a/6235.jsonl" "b/6235.jsonl" new file mode 100644--- /dev/null +++ "b/6235.jsonl" @@ -0,0 +1,2030 @@ +{"seq_id":"10670668172","text":"import pandas as pd\r\nfrom tabulate import tabulate\r\n\r\n\r\ndef _group_deferred_invoices(df: pd.DataFrame) ->pd.DataFrame:\r\n\r\n df = df.groupby([\r\n 'customer_number',\r\n 'customer_name',\r\n 'wbs_element',\r\n 'customer_po',\r\n 'sales_document',\r\n 'order_reason',\r\n 'invoice',\r\n ], as_index=False)[['ni', 'cogs']].sum()\r\n df = df.loc[(df['cogs'] > 0.001) | (df['ni'] > 0.001), :]\r\n df['action'] = ''\r\n df['period'] = 'pending_to_recognize'\r\n df['ni'] = df['ni'].map('{:,.2f}'.format)\r\n df['cogs'] = df['cogs'].map('{:,.2f}'.format)\r\n\r\n return df\r\n\r\n\r\ndef _group_actual_period_invoices(df: pd.DataFrame) -> pd.DataFrame:\r\n\r\n df = df.groupby([\r\n 'customer_number',\r\n 'customer_name',\r\n 'wbs_element',\r\n 'customer_po',\r\n 'sales_document',\r\n 'order_reason',\r\n 'invoice',\r\n ], as_index=False)[['ni', 'cogs']].sum()\r\n\r\n df['action'] = ''\r\n df['period'] = 'actual_period'\r\n df['ni'] = df['ni'].map('{:,.2f}'.format)\r\n df['cogs'] = df['cogs'].map('{:,.2f}'.format)\r\n\r\n return df\r\n\r\n\r\ndef make_report(actual_df: pd.DataFrame, deferred_df: pd.DataFrame) -> pd.DataFrame:\r\n deferred_df = _group_deferred_invoices(deferred_df)\r\n\r\n actual_df = _group_actual_period_invoices(actual_df)\r\n\r\n merged_df = actual_df.append(deferred_df)\r\n merged_df.reset_index(drop=True, inplace=True)\r\n return merged_df\r\n","repo_name":"SergeyTrue/revenue_recognition_step_1","sub_path":"processor/_prepare_report.py","file_name":"_prepare_report.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"19420116535","text":"\"\"\"User registration date\n\nRevision ID: 79bb7e453494\nRevises: 2e1f5d30859d\nCreate Date: 2022-08-19 22:23:35.379997\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '79bb7e453494'\ndown_revision = '2e1f5d30859d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n op.add_column('users', sa.Column('registration_date', sa.DateTime, nullable=True, default=None))\n\n\ndef downgrade() -> None:\n op.drop_column('users', 'registration_date')\n","repo_name":"dmitriyl99/signal-traider-bot","sub_path":"api/app/migrations/versions/79bb7e453494_user_registration_date.py","file_name":"79bb7e453494_user_registration_date.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"3473586539","text":"try:\n\tfoobar\nexcept NameError:\n\tprint(\"Problem: \")\n\nd = {\"name\" : \"Prashish\", \"age\" : \"24\", \"gender\" : \"male\"}\n\ndef get(d, key):\n\ttry:\n\t\treturn d[key]\n\texcept KeyError:\n\t\tprint(\"key not available\")\n\t\treturn None\n\nprint(get(d, \"ap\"))\n\n\nwhile True: \n\ttry:\n\t\tnum = int(input(\"Please enter a number: \"))\n\texcept ValueError:\n\t\tprint(\"This is not a number\")\n\telse:\n\t\tprint(\"Nice, it is a number\")\n\t\tbreak\n\tfinally: \n\t\tprint(\"runs no matter what\")\n\nprint(\"now you can watch\")","repo_name":"PrashishMan/python_init","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"43943014648","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom pyGRBz.utils import mag2Jy, convAB\nfrom astropy.table import Table, Column\nfrom pyGRBz.extinction_correction import correct_MW_ext\nfrom pyGRBz.io_grb import load_telescope_transmissions\nimport imp\n\n\ndef load_sys_response(data, wavelength, path):\n \"\"\" Load the system throuput curves for each filter in the data\n\n Returns\n -------\n sys_rep: astropy Table\n \"\"\"\n # works for constant wvl_step\n dwvl = np.gradient(wavelength)\n\n sys_res = []\n tel_name = []\n tel_band = []\n wvl_eff = []\n width = []\n zp = []\n\n # Sort the telescope used\n for tel in data.group_by([\"telescope\", \"band\"]).groups.keys:\n # Import the filter throughput curve only once if filter used several\n # times (for a light curve for instance)\n tel_name.append(tel[\"telescope\"])\n tel_band.append(tel[\"band\"])\n\n # Import the throughput curve\n filter_trans = load_telescope_transmissions(\n {\"telescope\": tel[\"telescope\"], \"band\": tel[\"band\"], \"path\": path},\n wavelength,\n )\n sys_res.append(filter_trans)\n\n # calculate the effective wavelength\n a = np.trapz(wavelength * filter_trans, wavelength)\n b = np.trapz(filter_trans, wavelength)\n wvl_eff.append(a / b)\n\n # Calculate the width of the band\n mask = filter_trans > 0.05 * max(filter_trans)\n\n width.append(wavelength[mask][-1] - wavelength[mask][0])\n\n # print (tel['telescope'], tel['band'], a/b, width[-1])\n # Not sure it is used anymore ... to be checked. Formula also to check\n zp.append(2.5 * np.log10(np.sum(filter_trans * dwvl, axis=0)) + 23.9)\n\n sys_res_table = Table(\n [tel_name, tel_band, wvl_eff, width, sys_res, zp],\n names=[\"telescope\", \"band\", \"wvl_eff\", \"band_width\",\n \"sys_response\", \"zeropoint\"])\n\n # Sort the table by telescope names and ascending eff. wavelength\n sys_res_table.sort([\"telescope\", \"wvl_eff\"])\n return sys_res_table\n\n\ndef formatting_data(data, system_response, grb_info, wavelength,\n dustrecalib=\"yes\"):\n \"\"\" \"\"\"\n try:\n _, path_dust_map, _ = imp.find_module(\"pyGRBaglow\")\n except:\n print(\"path to pyGRBaglow can not be found.\")\n\n dustmapdir = path_dust_map + \"/galactic_dust_maps\"\n\n #  Add filter info to data (throughut curve,eff. wvl and width)\n col_band_width = Column(name=\"band_width\", data=np.zeros(len(data)))\n col_band_effwvl = Column(name=\"eff_wvl\", data=np.zeros(len(data)))\n col_band_zp = Column(name=\"zeropoint\", data=np.zeros(len(data)))\n col_band_sysres = Column(\n name=\"sys_response\", data=np.zeros((len(data), len(wavelength)))\n )\n data.add_columns([col_band_effwvl, col_band_width, col_band_zp,\n col_band_sysres])\n\n for table in data.group_by([\"telescope\", \"band\"]).groups.keys:\n # print (table)\n mask1 = data[\"telescope\"] == table[\"telescope\"]\n mask1[mask1 == True] = data[mask1][\"band\"] == table[\"band\"]\n mask2 = system_response[\"telescope\"] == table[\"telescope\"]\n mask2[mask2 == True] = system_response[mask2][\"band\"] == table[\"band\"]\n # print (system_response[mask3][mask4]['sys_response'])\n # print (system_response[mask3][mask4]['sys_response'][0])\n\n width = []\n effwvl = []\n zp = []\n sys_res = []\n for i in range(np.sum(mask2)):\n width.append(system_response[mask2][\"band_width\"][0])\n effwvl.append(system_response[mask2][\"wvl_eff\"][0])\n zp.append(system_response[mask2][\"zeropoint\"][0])\n sys_res.append(system_response[mask2][\"sys_response\"][0])\n data[\"band_width\"][mask1] = width\n data[\"eff_wvl\"][mask1] = effwvl\n data[\"zeropoint\"][mask1] = zp\n data[\"sys_response\"][mask1] = sys_res\n\n # Convert vega magnitudes in AB if needed\n mask1 = data[\"phot_sys\"] == \"vega\"\n if mask1.any():\n # print ('some vega')\n\n #  If a Vega-AB correction is present in the file use this value\n\n if \"ABcorr\" in data.colnames:\n mask2 = (data[\"phot_sys\"] == \"vega\") & (~data[\"ABcorr\"].mask)\n if mask2.any():\n # print ('AB corr')\n for table in data[mask2]:\n mask3 = mask2.copy()\n\n mask3[mask3 == True] = data[mask3][\"Name\"] == table[\"Name\"]\n mask3[mask3 == True] = (\n data[mask3][\"telescope\"] == table[\"telescope\"]\n )\n mask3[mask3 == True] = data[mask3][\"band\"] == table[\"band\"]\n mask3[mask3 == True] = (\n data[mask3][\"time_since_burst\"] == table[\"time_since_burst\"]\n )\n\n newABmag = table[\"mag\"] + table[\"ABcorr\"]\n photsys = \"AB\"\n # substitute the vega magnitudes by AB ones\n data[\"mag\"][mask3] = newABmag\n data[\"phot_sys\"][mask3] = photsys\n\n #  When no AB correction is given in input file, compute it\n mask2 = (data[\"phot_sys\"] == \"vega\") & (data[\"ABcorr\"].mask)\n if mask2.any():\n\n # print ('convAB')\n for table in data[mask2]:\n mask3 = mask2.copy()\n\n mask3[mask3 == True] = data[mask3][\"Name\"] == table[\"Name\"]\n mask3[mask3 == True] = (\n data[mask3][\"telescope\"] == table[\"telescope\"]\n )\n mask3[mask3 == True] = data[mask3][\"band\"] == table[\"band\"]\n mask3[mask3 == True] = (\n data[mask3][\"time_since_burst\"] == table[\"time_since_burst\"]\n )\n\n newABmag = table[\"mag\"] + convAB(wavelength, table[\"sys_response\"])\n photsys = \"AB\"\n # substitute the vega magnitudes by AB ones\n data[\"mag\"][mask3] = newABmag\n data[\"phot_sys\"][mask3] = photsys\n\n else:\n # print ('convAB')\n for table in data[mask1]:\n mask3 = mask1.copy()\n\n mask3[mask3 == True] = data[mask3][\"Name\"] == table[\"Name\"]\n mask3[mask3 == True] = data[mask3][\"telescope\"] == table[\"telescope\"]\n mask3[mask3 == True] = data[mask3][\"band\"] == table[\"band\"]\n mask3[mask3 == True] = (\n data[mask3][\"time_since_burst\"] == table[\"time_since_burst\"]\n )\n\n newABmag = table[\"mag\"] + convAB(wavelength,\n table[\"sys_response\"])\n photsys = \"AB\"\n # substitute the vega magnitudes by AB ones\n data[\"mag\"][mask3] = newABmag\n data[\"phot_sys\"][mask3] = photsys\n\n # Correct for galactic extinction\n data = correct_MW_ext(data, grb_info, wavelength,\n dustmapdir=dustmapdir,\n recalibration=dustrecalib)\n\n # Add Flux to the seds\n convert_dict = {\"photometry_system\": \"AB\"}\n\n flux = mag2Jy(convert_dict, data[\"mag\"] - data[\"ext_mag\"]) * 1e6\n flux_err = np.array(abs(flux * -0.4 * np.log(10) * data[\"mag_err\"]))\n mask = data[\"detection\"] == -1\n if mask.any():\n flux_err[mask] = flux[mask] / 2.0\n\n col_flux = Column(name=\"flux\", data=flux, unit=\"microJy\")\n col_flux_err = Column(name=\"flux_err\", data=flux_err, unit=\"microJy\")\n\n data.add_columns([col_flux, col_flux_err])\n\n data.sort([\"Name\", \"eff_wvl\"])\n return data\n","repo_name":"dcorre/pyGRBz","sub_path":"pyGRBz/formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":7680,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"20"} +{"seq_id":"16852021954","text":"from jsonpickle import decode\r\nfrom flask import json\r\n\r\nclass Entity:\r\n @classmethod\r\n def load(cls, root):\r\n fileName = f\"{cls.__name__}/{cls.__name__}.json\".lower()\r\n handle = open(fileName)\r\n json_data = json.load(handle)\r\n\r\n list_data = []\r\n clsName = f\"{cls.__module__}.{cls.__name__}\"\r\n for item in json_data[root]:\r\n item.update({\"py/object\": clsName})\r\n list_data.append(decode(json.dumps(item)))\r\n \r\n return list_data\r\n \r\n def find(items, id):\r\n items_filtered = [item for item in items if item.id == id]\r\n if(len(items_filtered) == 0):\r\n return None\r\n return items_filtered[0]\r\n \r\n def validate_add(items, id):\r\n items_filtered = [item for item in items if item.id == id]\r\n if(len(items_filtered) > 0):\r\n raise f\"Apparel with id={id} exists!\"\r\n\r\n def validate_update(items, id):\r\n items_filtered = [item for item in items if item.id == id]\r\n if(len(items_filtered) == 0):\r\n raise f\"Apparel with id={id} not found!\"\r\n\r\n return items_filtered[0]\r\n\r\n def validate_delete(items, id):\r\n items_filtered = [item for item in items if item.id == id]\r\n if(len(items_filtered) == 0):\r\n raise f\"Apparel with id={id} not found!\"\r\n\r\n return items_filtered[0]\r\n\r\n","repo_name":"mthle17/919eaa","sub_path":"utils/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"31630887333","text":"x=\"\"\npalabras=[]\nwhile x!=\"salir\":\n x=input(\"Ingrese una palabra (cuando Termine escriba salir): \").lower()\n palabras.append(x)\n\npalabras.remove(\"salir\")\ncontador=0\npalabra=input(\"Ingrese la palabra para buscar: \").lower()\nfor i in palabras:\n if i==palabra:\n contador=contador+1\nprint(\"la palabra:\",palabra,\"se repite \",contador,\" veces\") ","repo_name":"dofirtwo/EjeciciosInicialesPython","sub_path":"14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"72077750450","text":"from collections import deque\nimport copy \n\nn = int(input())\n\ngraph = [[] for _ in range(n+1)]\nin_degree = [0 for _ in range(n+1)]\ntime = [0 for _ in range(n+1)]\n\nfor i in range(1,n+1):\n data = list(map(int, input().split()))\n time[i] = data[0]\n\n for j in range(1, len(data)-1):\n graph[data[j]].append(i)\n in_degree[i] += 1\n\n\nresult = copy.deepcopy(time)\nq = deque()\n\nfor i in range(1, n+1):\n if in_degree[i] == 0:\n q.append(i)\n\nwhile q:\n now = q.popleft()\n for neighbor in graph[now]:\n in_degree[neighbor] -=1\n result[neighbor] = max(result[neighbor], result[now]+time[neighbor])\n if in_degree[neighbor] == 0:\n q.append(neighbor) \n\nfor i in range(1, n+1):\n print(result[i])\n\n\n","repo_name":"yangju1999/Coding_Test","sub_path":"algorithms/graph3.py","file_name":"graph3.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"69872196851","text":"\"\"\"\nspeed in free fall\n\"\"\"\n\nfrom math import sqrt\n\nACC_G = 9.8\nINITIAL_VELOCITY = 0\n\n\n# Get Input\nheight = float(input(\"Enter drop height: \"))\n\n# Compute\nvelocity = sqrt(INITIAL_VELOCITY**2 + 2 * ACC_G* height)\n\n# Display\nprint(f\"Velocity when the object hits the ground is {velocity:.02f} \\\nmeters per second\")\n","repo_name":"a5eeM/PWB","sub_path":"Ch 01/ex_19_free_fall.py","file_name":"ex_19_free_fall.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"74410627890","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# IT is bad practice to have views directly talk to models.\n# Instead: define a service class for this (Which just provides the methods of DB model access you want for the view(s))\nfrom graphs.models import *\n\ndef index(request):\n graphs = Graph.objects.all()\n # Also, use a rendered django template and not this crappy string HTML stuff.\n return HttpResponse(\n \"Graphs! Graphs! Graphs!

Graph Names:
{}\".format(\n \"
\".join(\n [\n \"Name: {} Created: {}\".format(g.name, g.created_at)\n for g in graphs\n ]\n )\n )\n )\n","repo_name":"jmeichle/django_graphs","sub_path":"graphs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"38510650927","text":"from typing import Any, Union, List, Optional\nfrom requests import request\nfrom urllib.parse import urlencode\n\nfrom pydantic import BaseModel, BaseSettings, HttpUrl, validator\n\n\nclass GithubToken(BaseModel):\n access_token: str\n\n @validator(\"access_token\")\n def validate_access_token(cls, v):\n if not v.startswith(\"gho_\"):\n return ValueError(v)\n return v\n\n\nclass GithubTokenRespone(GithubToken):\n token_type: str\n scope: str\n\n @validator(\"token_type\")\n def validate_token_type(cls, v):\n if v != \"bearer\":\n return ValueError(v)\n return v\n\n\nclass AuthorizedResponse(GithubToken):\n id: int\n login: str\n\n @validator(\"login\")\n def validate_login(cls, v):\n if not len(v) > 0:\n return ValueError(v)\n return v\n\n @validator(\"id\")\n def validate_id(cls, v):\n if not v > 0:\n return ValueError(v)\n return v\n\n\nclass GithubUser(BaseModel):\n id: int\n login: str\n node_id: str\n url: HttpUrl\n html_url: HttpUrl\n gists_url: HttpUrl\n starred_url: HttpUrl\n subscriptions_url: HttpUrl\n organizations_url: HttpUrl\n repos_url: HttpUrl\n events_url: HttpUrl\n received_events_url: HttpUrl\n type: str\n site_admin: bool\n name: Optional[str]\n company: Optional[str]\n # ...","repo_name":"arrrrrmin/fastapi-github-oauth","sub_path":"app/models/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"11726498649","text":"import cv2\nimport os\nimport re\n\nfrom django.db import transaction\nfrom django.http import HttpResponse\n\nfrom books.models import Video, VIDEO_CHOICE_SERIES, VIDEO_CHOICE_MOVIE, VIDEO_CHOICE_MUSIC\n\nVALID_EXTENTIONS = ('.avi', '.mkv', '.mp4')\nINVALID_EXTENTIONS = ('.srt', '.sub', '.jpg', '.srtx', '.nfo', '.db', '.idx', '.doc')\n\n\n@transaction.atomic\ndef video_import(request):\n print()\n import_video_series()\n\n import_doctor_who_new()\n import_doctor_who_original()\n import_doctor_who_specials()\n\n import_video_movies()\n import_video_imdb_top100()\n\n import_video_music()\n return HttpResponse(status=201, content=\"OK\")\n\n\ndef import_video_series():\n Video.objects.filter(type=VIDEO_CHOICE_SERIES).delete()\n base_path = \"E:\\\\Series\"\n for series in os.listdir(base_path):\n print(\".\", end='')\n if series == \"Doctor Who\":\n continue\n seasons = []\n for entry in os.scandir(f\"{base_path}\\{series}\"):\n if entry.is_file():\n continue\n try:\n season = int(re.compile('\\d+').findall(entry.name)[0])\n seasons.append(season)\n except:\n pass\n Video(\n type=VIDEO_CHOICE_SERIES,\n series=series,\n seasons=\", \".join(get_season_ranges(seasons))\n ).save()\n print(\"SERIES READY\")\n\n\ndef import_doctor_who_new():\n seasons = []\n for entry in os.scandir(\"E:\\\\Series\\\\Doctor Who\\\\New Series (2005 - )\"):\n print(\".\", end='')\n try:\n season = int(re.compile('\\d+').findall(entry.name)[0])\n seasons.append(season)\n except:\n pass\n if len(seasons) > 0:\n Video(\n type=VIDEO_CHOICE_SERIES,\n series=\"Dr Who\",\n title=\"New series\",\n seasons=\", \".join(get_season_ranges(seasons))\n ).save()\n print(\"DR WHO NEW READY\")\n\n\ndef import_doctor_who_original():\n for entry in os.scandir(\"E:\\\\Series\\\\Doctor Who\\\\Original Series (1962 - 1989)\"):\n print(\".\", end='')\n Video(\n type=VIDEO_CHOICE_SERIES,\n series=\"Dr Who\",\n title=\"Original series\",\n seasons=entry.name\n ).save()\n print(\"DR WHO ORIGINALS READY\")\n\n\ndef import_doctor_who_specials():\n for entry in os.scandir(\"E:\\\\Series\\\\Doctor Who\\\\Specials\"):\n print(\".\", end='')\n if entry.is_file() and has_valid_extention(entry.name):\n Video(\n type=VIDEO_CHOICE_SERIES,\n series=\"Dr Who\",\n title=entry.name,\n ).save()\n print(\"DR WHO SPECIALS READY\")\n\n\ndef import_video_movies():\n Video.objects.filter(type=VIDEO_CHOICE_MOVIE).delete()\n base_path = \"E:\\\\Movies\"\n for movie in os.listdir(base_path):\n print(\".\", end='')\n get_movies(VIDEO_CHOICE_MOVIE, base_path, movie)\n print(\"MOVIES READY\")\n\n\ndef import_video_imdb_top100():\n base_path = \"E:\\\\IMDB Top 100\"\n for movie in os.listdir(base_path):\n print(\".\", end='')\n get_movies(VIDEO_CHOICE_MOVIE, base_path, movie, \"IMDB Top 100\")\n print(\"IMDB TOP 100 READY\")\n\n\ndef import_video_music():\n Video.objects.filter(type=VIDEO_CHOICE_MUSIC).delete()\n base_path = \"E:\\\\MusicVideo\"\n for movies in os.listdir(base_path):\n print(\".\", end='')\n get_movies(VIDEO_CHOICE_MUSIC, f\"{base_path}\\{movies}\", \"\", movies)\n print(\"MUSIC READY\")\n\n\ndef get_movies(type, base_path, movie, series=None):\n for entry in os.scandir(f\"{base_path}\\{movie}\"):\n try:\n if not entry.is_file():\n print(f'Warning: Found extra folder for \"{movie}\"')\n continue\n if has_valid_extention(entry.name):\n vid = cv2.VideoCapture(entry.path)\n width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n Video(\n type=type,\n title=entry.name,\n series=series if series else movie,\n seasons=\"\",\n screen_width=width\n ).save()\n except Exception as e:\n print(f'Error for \"{entry.name}\": {e}')\n\n\ndef has_valid_extention(name):\n i = name.rfind(\".\")\n if i < 0:\n return False\n extention = name[i:]\n if extention not in VALID_EXTENTIONS + INVALID_EXTENTIONS:\n print(f'Warning, invalid extention: \"{name}\"')\n return False\n return extention in VALID_EXTENTIONS\n\n\ndef get_season_ranges(seasons):\n \"\"\"\n seasons: List of integers\n \"\"\"\n ranges = []\n range_low = 0\n range_high = 0\n while len(seasons) > 0:\n season = seasons[0]\n seasons.pop(0)\n if range_low == 0:\n range_low = season\n range_high = season\n else:\n if season == range_high + 1:\n range_high += 1\n else:\n if range_high > range_low:\n ranges.append(f\"{range_low}-{range_high}\")\n else:\n ranges.append(f\"{range_low}\")\n range_low = season\n range_high = season\n if range_low > 0:\n if range_high > range_low:\n ranges.append(f\"{range_low}-{range_high}\")\n else:\n ranges.append(f\"{range_low}\")\n return ranges\n","repo_name":"AlexKwakIT/BooksDjango","sub_path":"books/import_video.py","file_name":"import_video.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"73985867570","text":"from diem import testnet, LocalAccount\nfrom diem.testing.miniwallet import AppConfig, ServerConfig\nfrom diem.testing.suites import envs\nfrom typing import Optional, TextIO\nimport logging, click, functools, pytest, os, sys, re, json\n\nlog_format: str = \"%(name)s [%(asctime)s] %(levelname)s: %(message)s\"\nclick.option = functools.partial(click.option, show_default=True) # pyre-ignore\n\n\ndef set_env(name: str, is_io: bool = False): # pyre-ignore\n def callback(_c, _p, val): # pyre-ignore\n if val:\n os.environ[name] = val.read() if is_io else str(val)\n return val\n\n return callback\n\n\n@click.group()\ndef main() -> None:\n pass\n\n\n@click.command()\n@click.option(\"--name\", \"-n\", default=\"mini-wallet\", help=\"Application name.\")\n@click.option(\"--host\", \"-h\", default=\"localhost\", help=\"Start server host.\")\n@click.option(\"--port\", \"-p\", default=8888, help=\"Start server port.\")\n@click.option(\"--jsonrpc\", \"-j\", default=testnet.JSON_RPC_URL, help=\"Diem fullnode JSON-RPC URL.\")\n@click.option(\"--faucet\", \"-f\", default=testnet.FAUCET_URL, help=\"Testnet faucet URL.\")\n@click.option(\"--enable-debug-api\", \"-o\", default=True, help=\"Enable debug API.\", type=bool, is_flag=True)\n@click.option(\n \"--logfile\", \"-l\", default=None, type=click.Path(), help=\"Log to a file instead of printing into console.\"\n)\n@click.option(\n \"--import-diem-account-config-file\",\n \"-i\",\n default=None,\n help=\"Import the diem account config from a file. The config file content should be JSON generated from command `gen-diem-account-config`.\",\n type=click.File(),\n)\ndef start_server(\n name: str,\n host: str,\n port: int,\n jsonrpc: str,\n faucet: str,\n enable_debug_api: bool,\n import_diem_account_config_file: Optional[TextIO],\n logfile: Optional[str],\n) -> None:\n logging.basicConfig(level=logging.INFO, format=log_format, filename=logfile)\n configure_testnet(jsonrpc, faucet)\n\n conf = AppConfig(name=name, server_conf=ServerConfig(host=host, port=port), enable_debug_api=enable_debug_api)\n if import_diem_account_config_file:\n conf.account_config = json.load(import_diem_account_config_file)\n\n print(\"Server Config: %s\" % conf)\n\n client = testnet.create_client()\n print(\"Diem chain id: %s\" % client.get_metadata().chain_id)\n\n conf.start(client).join()\n\n\n@click.command()\n@click.option(\n \"--target\",\n \"-t\",\n default=\"http://localhost:8888\",\n callback=set_env(envs.TARGET_URL),\n help=\"Target mini-wallet application URL.\",\n)\n@click.option(\n \"--stub-bind-host\",\n \"-h\",\n default=\"localhost\",\n callback=set_env(envs.DMW_STUB_BIND_HOST),\n help=\"The host the miniwallet stub server will bind to\",\n)\n@click.option(\n \"--stub-bind-port\",\n \"-p\",\n default=None,\n callback=set_env(envs.DMW_STUB_BIND_PORT),\n help=\"The port the miniwallet stub server will bind to. Random if empty.\",\n type=int,\n)\n@click.option(\n \"--stub-diem-account-base-url\",\n \"-u\",\n default=None,\n callback=set_env(envs.DMW_STUB_DIEM_ACCOUNT_BASE_URL),\n help=\"The address that will be used for offchain callbacks. Defaults to http://localhost:{random_port}\",\n)\n@click.option(\"--jsonrpc\", \"-j\", default=testnet.JSON_RPC_URL, help=\"Diem fullnode JSON-RPC URL.\")\n@click.option(\"--faucet\", \"-f\", default=testnet.FAUCET_URL, help=\"Testnet faucet URL.\")\n@click.option(\n \"--pytest-args\",\n default=\"\",\n help=\"Additional pytest arguments, split by empty space, e.g. `--pytest-args '-v -s'`.\",\n show_default=False,\n)\n@click.option(\n \"--test-debug-api\",\n \"-d\",\n default=False,\n is_flag=True,\n callback=set_env(envs.TEST_DEBUG_API),\n help=\"Run tests for debug APIs.\",\n type=bool,\n)\n@click.option(\n \"--logfile\", \"-l\", default=None, help=\"Log to a file instead of printing into console.\", type=click.Path()\n)\n@click.option(\"--verbose\", \"-v\", default=False, help=\"Enable verbose log output.\", type=bool, is_flag=True)\n@click.option( # pyre-ignore\n \"--import-stub-diem-account-config-file\",\n \"-i\",\n default=None,\n callback=set_env(envs.DMW_STUB_DIEM_ACCOUNT_CONFIG, is_io=True),\n help=\"Import the diem account config from a file for miniwallet stub server. The config file content should be JSON generated from command `gen-diem-account-config`.\",\n type=click.File(),\n)\ndef test(\n target: str,\n stub_bind_host: Optional[str],\n stub_bind_port: Optional[int],\n stub_diem_account_base_url: Optional[str],\n jsonrpc: str,\n faucet: str,\n pytest_args: str,\n test_debug_api: bool,\n logfile: Optional[str],\n verbose: bool,\n import_stub_diem_account_config_file: Optional[TextIO],\n) -> None:\n configure_testnet(jsonrpc, faucet)\n\n # If stub_bind_host is provided, then stub_diem_account_base_url must be as well or the test won't work\n if stub_bind_host is not None and stub_bind_host != \"localhost\" and not stub_diem_account_base_url:\n raise click.ClickException(\"--stub-diem-account-base-url is required when passing in a custom --stub-bind-host\")\n\n args = [arg for arg in re.compile(\"\\\\s+\").split(pytest_args) if arg]\n args = [\"--pyargs\", \"diem.testing.suites\"] + args\n if verbose:\n args.append(\"--log-level=INFO\")\n args.append(\"-s\")\n args.append(\"-v\")\n if logfile:\n args.append(\"--log-file=%s\" % logfile)\n\n code = pytest.main(args)\n sys.stdout.flush()\n raise SystemExit(code)\n\n\n@click.command()\ndef gen_diem_account_config() -> None:\n print(LocalAccount().to_json())\n\n\ndef configure_testnet(jsonrpc: str, faucet: str) -> None:\n testnet.JSON_RPC_URL = jsonrpc\n testnet.FAUCET_URL = faucet\n print(\"Diem JSON-RPC URL: %s\" % testnet.JSON_RPC_URL)\n print(\"Diem Testnet Faucet URL: %s\" % testnet.FAUCET_URL)\n\n\nmain.add_command(start_server)\nmain.add_command(test)\nmain.add_command(gen_diem_account_config)\n","repo_name":"palliums-developers/pypay","sub_path":"diem_client/testing/cli/click.py","file_name":"click.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"25939137762","text":"# -------------------------------------------------------------------------- #\n# Title: Directed Weighted Graph - Python #\n# Author: Amir Gillette, Gal Koaz #\n# Course: OOP #\n# -------------------------------------------------------------------------- #\nimport collections\nimport heapq\nimport json\nimport math\nimport random\n\nimport matplotlib.pyplot as plt\nfrom typing import List, Union, Any\n\nfrom GraphAlgoInterface import GraphAlgoInterface\nfrom DiGraph import DiGraph\n\n\nclass GraphAlgo(GraphAlgoInterface):\n\n def __init__(self, graph: DiGraph = DiGraph()):\n \"\"\"\n The method initializes a given graph.\n Parameters\n ----------\n :param: graph: DiGraph\n a given graph.\n \"\"\"\n self.graph = graph\n\n def get_graph(self) -> DiGraph:\n \"\"\"\n The method returns an initialized graph.\n :return: the directed graph on which the algorithm works on.\n \"\"\"\n return self.graph\n\n def load_from_json(self, file_name: str) -> bool:\n \"\"\"\n Loads a graph from a json file.\n @param file_name: The path to the json file\n @returns True if the loading was successful, False o.w.\n The method loads the given file with the JSON_Operation class.\n Afterwards, the method initialize the graph with the json.\n @returns true if the method successfully read the json file.\n \"\"\"\n try:\n with open(file_name) as file:\n load = json.load(file)\n self.graph = DiGraph()\n for nodes in load[\"Nodes\"]:\n if \"pos\" in nodes:\n pos = tuple(map(float, str(nodes[\"pos\"]).split(\",\")))\n else:\n pos = None\n self.graph.add_node(nodes[\"id\"], pos)\n for edge in load[\"Edges\"]:\n self.graph.add_edge(edge[\"src\"], edge[\"dest\"], edge[\"w\"])\n\n return True\n except Exception as e:\n print(e)\n return False\n finally:\n file.close()\n\n def save_to_json(self, file_name) -> bool:\n \"\"\"\n Saves the graph in JSON format to a file\n @param file_name: The path to the out file\n @return: True if the save was successful, False o.w.\n the method saves the graph as a json file in the given name.\n @returns true if the method have successfully written the json file\n \"\"\"\n try:\n with open(file_name, \"w\") as outfile:\n json.dump(self.graph, default=lambda o: o.__dict__,\n sort_keys=True, indent=4, fp=outfile)\n except Exception as e:\n print(e)\n return False\n return True\n\n def shortest_path(self, id1: int, id2: int) -> (float, list):\n \"\"\"\n The method uses Dijkstra's algorithm for finding the optimal path from src to destination.\n\n In terms of time complexity: the way we implemented the algorithm gives us time complexity\n of O(|V|+|E|*log(|V|)), because the structure we have applied is min heap, which arranges\n the nodes by their current accumulated distance.\n\n Parameters\n ----------\n :param: id1: int\n an id of a certain nodes.\n :param: id2: int\n an id of a certain nodes.\n\n Returns\n -------\n :return: float: a list of the nodes' id's in the path, and the overall distance\n :return: float: a list of the nodes' id's in the path, and the overall distance\n \"\"\"\n if id1 not in self.graph.get_all_v() or id2 not in self.graph.get_all_v():\n return None\n\n parent = {id1: -1}\n dist = {}\n for node_id in self.graph.get_all_v():\n dist[node_id] = math.inf\n dist[id1] = 0 # update id1 (src node) to be zero\n\n heap = [(0, id1)] # cost from start node,end node\n visited = set()\n while heap:\n weight, curr = heapq.heappop(heap)\n if curr in visited:\n continue\n visited.add(curr)\n\n if self.graph.all_out_edges_of_node(curr) is None:\n continue\n for key, w in self.graph.all_out_edges_of_node(curr).items():\n if key in visited:\n continue\n curr_w = dist[curr] + w\n if dist[key] > curr_w:\n dist[key] = curr_w\n parent[key] = curr\n heapq.heappush(heap, (dist[key], key))\n if curr == id2:\n break\n\n path = []\n curr_parent = id2\n if dist[id2] != math.inf:\n while curr_parent != -1:\n path.insert(0, curr_parent)\n curr_parent = parent[curr_parent]\n\n return dist[id2], path\n\n def dijkstra(self, id1: int) -> dict[Union[int, Any], Union[Union[float, int], Any]]:\n \"\"\"\n dijkstra algorithm, exactly as the shortest path as the above,\n but instead returns the dist dict.\n Parameters\n ----------\n :param id1: int\n an id of a certain node (the src node)\n\n :return: the dist dict with all shortest paths from src node,\n to any other node.\n \"\"\"\n dist = {}\n for node_id in self.graph.get_all_v():\n dist[node_id] = math.inf\n dist[id1] = 0 # update id1 (src node) to be zero\n\n heap = [(0, id1)] # cost from start node,end node\n visited = set()\n while heap:\n (weight, curr) = heapq.heappop(heap)\n if curr in visited:\n continue\n visited.add(curr)\n if self.graph.all_out_edges_of_node(curr) is None:\n continue\n for key, w in self.graph.all_out_edges_of_node(curr).items():\n if key in visited:\n continue\n curr_w = dist[curr] + w\n if dist[key] > curr_w:\n dist[key] = curr_w\n heapq.heappush(heap, (dist[key], key))\n return dist\n\n def TSP(self, node_lst: List[int]) -> (List[int], float):\n \"\"\"\n DEFINITION TSP problem. By given a complete graph with weighted edges,\n what is the Hamiltonian cycle (the path that visits all every node once) of minimum cost.\n\n The idea: this method takes out the first couple of nodes, finds the shortest path\n between each pair, but we are wisely checking if a new node to be checked\n is already in the list, if so it skips on him. This wisely step in the algorithm\n prevent redundant passing over the same paths.\n\n In addition, the method also calculates the path cost, without even\n calling an additional method, or iterating again over the path, which cost a lot of time since\n python has high cost for calling a function.\n\n Time complexity. in terms of time complexity, the algorithm checks each\n ordered pairs, but actually it passes every city only once and apply Dijkstra's algorithm.\n Let us mark the cities as c then it takes O(c * (|E|+|E|log|V|)).\n\n Parameters\n ----------\n :param: node_lst: List[int]\n the cities to go through.\n\n Returns\n -------\n :return: a list of the nodes' id's in the path, and the overall distance\n \"\"\"\n if len(node_lst) <= 1:\n return node_lst, 0\n\n path_cost = 0\n cities = node_lst\n ans = []\n src = cities.pop(0)\n dest = cities.pop(0)\n shortest = self.shortest_path(src, dest)\n ans += shortest[1]\n path_cost += shortest[0]\n\n while cities:\n curr_city = cities.pop(0)\n if ans.__contains__(curr_city):\n continue\n last = ans.pop(len(ans) - 1)\n curr = self.shortest_path(last, curr_city)\n ans += curr[1]\n path_cost += curr[0]\n return ans, path_cost\n\n def path_cost(self, node_lst: List[int]) -> float:\n \"\"\"\n The method calculates the path's cost, meaning that it goes over each pair\n and add its weight to the total weight.\n\n Parameters\n ----------\n :param: node_lst: List[int]\n the path to go through.\n\n Returns\n -------\n :return: float: cost of the given path.\n \"\"\"\n s = 0\n for i in range(0, len(node_lst) - 1):\n s += self.graph.all_out_edges_of_node(node_lst[i])[node_lst[i + 1]]\n\n return s\n\n def isConnected(self) -> bool:\n \"\"\"\n The method checks whether the graph is strongly connected, designed for checking the center method.\n The method following Kosaraju's algorithm for SCC (Strongly Connected Components).\n\n Time complexity. the method performs BFS traversal twice, once on the normal edges,\n and for the inverted graph, whereas both can be achieved in O(1) as that is\n how the graph was constructed, when the in edges of node v, is the dictionary\n for the inverted edges of the normal graph.\n\n Therefore, we will get:\n T(n) = O(2*(|E|+|V|)) = O(|E|+|V|)\n\n Returns\n -------\n :return: bool: true if it is strongly connected, o.w. returns false.\n \"\"\"\n root = list(self.graph.get_all_v())[0]\n return True if (self.BFS(root) and self.BFS(root, invert=True)) else False\n\n def BFS(self, id1: int, invert: bool = False) -> bool:\n \"\"\"\n The method calculates the path's cost, meaning that it goes over each pair\n and add its weight to the total weight.\n\n Parameters\n ----------\n :param: id1: int\n a source vertex id.\n\n :param: invert: bool\n a boolean flag indicates if the graph is inverted or not.\n\n Returns\n -------\n :return: bool: the method returns true if BFS succeeded to reach to any vertex, o.w. returns false.\n \"\"\"\n visited, q = set(), collections.deque([id1])\n while q:\n v = q.popleft()\n V_neighbours = self.graph.all_in_edges_of_node(v) if invert else self.graph.all_out_edges_of_node(v)\n if V_neighbours is None:\n return False\n for u in V_neighbours:\n if u not in visited:\n visited.add(u)\n q.append(u)\n return True if visited.__len__() == self.graph.v_size() else False\n\n def centerPoint(self) -> (int, float):\n \"\"\"\n This method finds the center of the graph.\n\n First, the method checks whether the graph is strongly connected or not,\n because it is only the exceptional case. in this case, immediately RETURN -> (None, inf)\n\n Graph Center DEFINITION. the graph center is the vertex to have the minimum eccentricity.\n\n In a defined formula: C = min{e(v) | for each v ∈ G(V)}\n\n This method also computes the eccentricity of a vertex v.\n\n Eccentricity DEFINITION. the eccentricity of a vertex v is the maximum distance between v towards\n each one of the other vertices.\n\n Distance DEFINITION. the distance d(v,u) is the shortest path between v and u.\n\n\n As being said the eccentricity defined as the following: e(v) = max{d(v,u) such that u ∈ V(G) for each u ∈ V(G)}.\n\n Returns\n -------\n :return: int: the center index (node id).\n :return: float: the value of the minimum distance.\n \"\"\"\n if not self.isConnected():\n return None, math.inf\n\n minimum = math.inf\n center = 0\n for v in self.graph.get_all_v():\n maximum = 0\n dist = self.dijkstra(v)\n for d in dist.values():\n if d > maximum:\n maximum = d\n e = maximum\n if e < minimum:\n minimum = e\n center = v\n return center, minimum\n\n def plot_graph(self) -> None:\n \"\"\"\n The method plots the graph.\n If the nodes have a position, the nodes will be placed there.\n Otherwise, they will be placed in a random but elegant manner.\n We Update all Node with pos if they not have we create a random\n Position, we draw the lines with the arrow of each, and we\n skip if the Edge between the vertex is null.\n @return: None\n \"\"\"\n for v in self.graph.get_all_v():\n if self.graph.vertices[v] is None:\n self.graph.vertices[v] = (random.uniform(0, 36), random.uniform(0, 33), 0.0)\n for v in self.graph.get_all_v():\n t = self.graph.vertices[v]\n x = t[0]\n y = t[1]\n plt.plot(x, y, color='blue', marker='o', markersize=8, scalex=True, scaley=True)\n if self.graph.all_out_edges_of_node(v) is not None:\n for u in self.graph.all_out_edges_of_node(v):\n t2 = self.graph.vertices[u]\n x2 = t2[0]\n y2 = t2[1]\n plt.annotate(\"\", xy=(x, y), xytext=(x2, y2),\n arrowprops=dict(arrowstyle='< -', color='red', shrinkA=7, shrinkB=7, patchA=None,\n patchB=None))\n plt.text(x, y, str(v), horizontalalignment='center',\n verticalalignment='center',\n bbox=dict(facecolor='blue', edgecolor='red', boxstyle='circle, pad=0.15'), color='white')\n plt.title(\"Directed Weighted Graph by Gal & Amir\\n\", fontsize=15)\n plt.xlabel(\"Nodes Size: ${}$, Edges Size: ${}$\".format(self.graph.v_size(), self.graph.e_size()),\n fontsize=13)\n plt.show()\n","repo_name":"GalKoaz/OOP-Ex3","sub_path":"src/GraphAlgo.py","file_name":"GraphAlgo.py","file_ext":"py","file_size_in_byte":13935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"22182536715","text":"# In[ ]:\n\n\n#basic import statements\nimport unittest\nimport sqlite3\nimport json\nimport os\nimport pandas as pd\nimport matplotlib as plt\nimport urllib.request\nfrom textwrap import wrap\n\n\n1\n# In[ ]:\n\n\n\ndef setUpDatabase(db_name):\n #path = os.path.dirname(os.path.abspath(__file__))\n conn = sqlite3.connect(db_name)\n cur = conn.cursor()\n return cur, conn\ncur, conn = setUpDatabase('CandidateData.db')\n\n\n# In[ ]:\n\n\n\ncandidates = [\n \n[\"Andrew Yang\", \"yang2020.com\", \"AndrewYang\"],\n[\"Kamala Harris\", \"kamalaharris.org\", \"KamalaHarris\"],\n[\"Bernie Sanders\",\"berniesanders.com\", \"BernieSanders\"],\n[\"Mike Bloomberg\", \"mikebloomberg.com\", \"MikeBloomberg\"],\n[\"Joe Biden\",\t\"joebiden.com\", \"JoeBiden\"],\n[\"Pete Buttigieg\", \"peteforamerica.com\", \"PeteButtigieg\"],\n[\"Elizabeth Warren\",\t\"elizabethwarren.com\", \"ewarren\"],\n[\"Tulsi Gabbard\", \"tulsi2020.com\", \"TulsiGabbard\"],\n[\"Amy Klobucher\", \"amyklobuchar.com\", \"amyklobuchar\"],\n[\"Tom Steyer\", \"tomsteyer.com\", \"TomSteyer\"],\n[\"Donald Trump\", \"donaldjtrump.com\",\"realDonaldTrump\"]\n]\n\ncandidate_df = pd.DataFrame(candidates)\ncandidate_df = candidate_df.rename(columns={0: \"Candidate\", 1: 'Website', 2: 'Twitter Handle'}, errors=\"raise\")\n\n\n# In[ ]:\n\n#VISUALIZATIONS\n\ncur.execute(\"SELECT * FROM WebsiteData WHERE category='Pageviews Over 1 Month'\")\ndf = pd.DataFrame(cur.fetchall())\n\ndf = df.rename(columns={1: \"Candidate\", 2: 'Website Statistic', 3: \"Pageviews Over 1 Month (per 100k)\"}, errors=\"raise\")\n\n\ndf = df.set_index(\"Candidate\")\ndf = df.iloc[:, 2:]\ndf = df * 10\n\n# cur.execute(\"SELECT * FROM Gtrend_MEAN WHERE time_period = 'two years' \")\n# df2 = pd.DataFrame(cur.fetchall())\n# df2 \n\n\n\ncur.execute(\"\"\"\n\nSELECT \n*\nFROM\n(SELECT * FROM Gtrend_MEAN WHERE time_period = 'one month') as t1\nINNER JOIN\n(SELECT * FROM Gtrend_MEAN WHERE time_period = 'three months') as t2\nON t1.name = t2.name\n\n\n\"\"\")\n\ndf2 = pd.DataFrame(cur.fetchall())[[2, 4, 9]]\n\n\ndf2 = df2.rename(columns={2: 'Candidate', 4: \"Google Trends Mean over 1 Month\", 9: \"Three Month Mean\", }, errors=\"raise\")\n\n\ndf2 = df2.set_index('Candidate')\n# df2 = df2.iloc[:, 1:]\n\n#df.iloc[1] = df.iloc[:,1] * 100\n\n\ncombined_df = df.merge(df2, left_on='Candidate', right_on='Candidate')\ncombined_df\n\npageviews_vs_gtrends = combined_df\ncorr = pageviews_vs_gtrends.corr(method='pearson')\ncorr.head()\n\nax = combined_df.iloc[:, [0, 1]].plot(kind=\"bar\", title=\"\\n\".join(wrap(\"Average Google Trend & Website Pageviews Score across Candidates | Corr {}\".format(corr.iloc[0,1]))))\ntitle = ax.set_title(\"\\n\".join(wrap(\"Average Google Trend & Website Pageviews Score across Candidates | Corr {}\".format(corr.iloc[0,1]), 60)))\n\nfig = ax.get_figure()\nfig.tight_layout()\nfig.subplots_adjust(top=0.88)\nfig.savefig('avg_gtrend_vs_pageviews.png')\n \n\n\n\n# In[ ]:\n\n\n#cur.execute(\"SELECT DISTINCT name, avg(percent) FROM DemPrimary WHERE name='Kamala Harris'\")\ncur.execute(\"SELECT name, AVG(percent) FROM DemPrimary GROUP BY name\")\ndf4 = pd.DataFrame(cur.fetchall())\n\ndf4 = df4.rename(columns={0: \"Candidate\", 1: 'Average Polling Percentage'}, errors=\"raise\")\n\ndf4 = df4.set_index(\"Candidate\")\ndf4\n\ncombined_df = combined_df.merge(df4, left_on='Candidate', right_on='Candidate')\ncombined_df\n\ncombined_df.loc[:, ['Google Trends Mean over 1 Month', 'Average Polling Percentage', 'Pageviews Over 1 Month (per 100k)']].plot(kind=\"bar\", title=\"Comparing Average Pageviews, Google Trends, and Polling Statistics across Candidates\")\n\ndata = combined_df.loc[:, ['Google Trends Mean over 1 Month', 'Average Polling Percentage', 'Pageviews Over 1 Month (per 100k)']]\ncorr = data.corr(method='pearson')\nprint(corr)\n\n# ax = combined_df.loc[:, ['Google Trends Mean over 1 Month', 'Average Polling Percentage', 'Pageviews Over 1 Month (per 100k)']].plot(kind=\"bar\", title=\"Comparing Average Pageviews, Google Trends, and Polling Statistics across Candidates\")\n# fig = ax.get_figure()\n# fig.tight_layout()\n# fig.subplots_adjust(top=0.88) \n# fig.savefig('avg_gtrend_vs_pageviews_vs_polling_statistics_dems.png')\n\n#ax = combined_df.loc[:, ['Google Trends Mean over 1 Month', 'Average Polling Percentage', 'Pageviews Over 1 Month (per 100k)']].plot(kind=\"bar\", title=\"Comparing Average Pageviews, Google Trends, and Polling Statistics across Candidates\")\nax = combined_df.loc[:, ['Google Trends Mean over 1 Month', 'Average Polling Percentage', 'Pageviews Over 1 Month (per 100k)']].plot(kind=\"bar\", title=\"\\n\".join(wrap(\"Comparing Average Pageviews, Google Trends, and Polling Statistics across Candidates\")))\ntitle = ax.set_title(\"\\n\".join(wrap(\"Comparing Average Pageviews, Google Trends, and Polling Statistics across Candidates\", 60)))\nfig = ax.get_figure()\nfig.tight_layout()\nfig.subplots_adjust(top=0.88)\nfig.savefig('avg_gtrend_vs_pageviews_vs_polling_statistics_dems.png')\n\n\n\n\n# In[ ]:\n\n\n\n# In[ ]:\n\n\n\n# In[ ]:\n\n\ncur.execute(\"\"\"\n\nSELECT \n*\nFROM\n(SELECT * FROM Gtrend_MEAN WHERE time_period = 'one month') as t1\nINNER JOIN\n(SELECT * FROM Gtrend_MEAN WHERE time_period = 'three months') as t2\nON t1.name = t2.name\n\n\n\"\"\")\n\ndf2 = pd.DataFrame(cur.fetchall())[[2, 4, 9]]\n\ndf2 = df2.rename(columns={2: 'Candidate', 4: \"Google Trends Mean over 1 Month\", 9: \"Three Month Mean\", }, errors=\"raise\")\ndf2 = df2.set_index('Candidate')\n\n\n# cur.execute(\"SELECT * FROM Gtrend_MEAN\")\n# results = cur.fetchall() \n\n# df = pd.DataFrame(results)\n\n# df = df.set_index(1)\n# df = df.iloc[:, 1:]\n# df = df.rename(columns={2: \"One Month Mean\", 3: \"Three Month Mean\", 4: \"One Day Mean\", 5: \"Seven Day Mean\", 6: \"Five Year Mean\"}, errors=\"raise\")\n# df\n\ndf2\n\n# In[ ]:\n\n\n#one month delta\ndf2.plot(kind=\"bar\", title=\"Average Google Trend Score across Candidates\")\nax = df2.plot(kind=\"bar\", title=\"Average Google Trend Score across Candidates\")\nfig = ax.get_figure()\nfig.tight_layout()\nfig.subplots_adjust(top=0.88)\nfig.savefig('avg_gtrend_all_candidates.png')\n\n\n\n# In[ ]:\n\n\ncur.execute(\"\"\"\n\nSELECT \n*\nFROM\n(SELECT * FROM Gtrend_DELTA WHERE time_period = 'one month') as t1\nINNER JOIN\n(SELECT * FROM Gtrend_DELTA WHERE time_period = 'three months') as t2\nON t1.name = t2.name\n\n\n\"\"\")\n\ndf3 = pd.DataFrame(cur.fetchall())[[2, 4, 9]]\n\ndf3 = df3.rename(columns={2: 'Candidate', 4: \"1 Month Delta\", 9: \"3 Month Delta\", }, errors=\"raise\")\ndf3 = df3.set_index('Candidate')\n\n\n\n# In[ ]:\ndf3.plot(kind=\"bar\", title=\"Delta in Google Trend Searches across Candidates\")\nax = df3.plot(kind=\"bar\", title=\"Delta in Google Trend Searches across Candidates\")\nfig = ax.get_figure()\nfig.tight_layout()\nfig.subplots_adjust(top=0.88)\nfig.savefig('delta_gtrend_all_candidates.png')\n\n\n\n# %%\n\n#BONUS API & CALCULCATION & PRINT OUTPUT\ncur.execute('DROP TABLE IF EXISTS twitter_table')\n\ncur.execute(\"CREATE TABLE twitter_table (name TEXT, followers REAL)\")\n\n\n\nimport tweepy\nimport numpy as np\n\nconsumer_key = \"nDbRA7vy5j0nxhBtZloHn7xee\"\nconsumer_secret = \"aIEVKKPeFjbjqZoyjRObUoMSsTcTXzxfzrBWr4g8T9coIUyNev\"\naccess_token = \"703356714-BaFvjE2YkCCB5zJkyFZCCNDTfnmWnwSVKEXh7cXR\"\naccess_token_secret = \"N75fvl0rPehgtO7v5oNZcdI3GAUzqGMqhnmcMVW3ttWGM\"\n\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n#candidate_df = candidate_df.set_index(\"Candidate\")\ncandidate_df['Twitter Followers'] = np.nan\n\nfor index, val in candidate_df[\"Twitter Handle\"].iteritems():\n user = api.get_user(val)\n candidate_df['Twitter Followers'][index] = user.followers_count / 1000000\n cur.execute(\"INSERT INTO twitter_table (name, followers) VALUES (?,?)\",(candidate_df[\"Candidate\"][index], user.followers_count / 1000000))\n #print(candidate_df[\"Candidate\"][index])\nconn.commit()\n\n# %%\n\ncombined_df = combined_df.merge(candidate_df, left_on='Candidate', right_on=\"Candidate\")\n\ncombined_df = combined_df.set_index(\"Candidate\")\ncombined_df.loc[:, ['Average Polling Percentage', 'Twitter Followers']]\n\n# %%\n\n# %%\ndata2 = combined_df.loc[:, ['Average Polling Percentage', 'Twitter Followers']]\ncorr = data2.corr(method='pearson')\ncorr\n\n# %%\nax = combined_df.loc[:, ['Average Polling Percentage', 'Twitter Followers']].plot(kind=\"bar\", title=\"Average Polling Percentages vs Twitter Followers (per million) | Corr: {}\".format(corr.iloc[1, 0]))\nfig = ax.get_figure()\nfig.tight_layout()\nfig.subplots_adjust(top=0.88)\nfig.savefig('twitter_vs_polling.png')\n\n\n# %%\n\n#final to csv\ncorr = combined_df.iloc[:, [0,1, 2, 3, 6]].corr(method='pearson')\ncorr.to_csv(r'correlations.csv')\n\n# %%\n\n\n# %%\n\n\n\n# %%\n\n\n# %%\n","repo_name":"amulyaparmar/amazing-206-project","sub_path":"gtrends_voting_web_visualizations.py","file_name":"gtrends_voting_web_visualizations.py","file_ext":"py","file_size_in_byte":8372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"70259790449","text":"from hyperopt import STATUS_OK, hp, tpe, Trials, fmin\nimport os\nfrom gan_thesis.models.general.utils import save_json, HiddenPrints\n\n\ndef optimize(space, file_path=None, max_evals=5):\n if space.get('model') == 'ctgan':\n from gan_thesis.models.ctgan.synthesizer import build_and_train, sampler, optim_loss\n elif space.get('model') == 'tgan':\n from gan_thesis.models.tgan.synthesizer import build_and_train, sampler, optim_loss\n # elif space.get('model') == 'wgan':\n # from gan_thesis.models.wgan.synthesizer import build_and_train, sampler, optim_loss\n\n def objective(params):\n \"\"\"Objective function for GAN Hyperparameter Tuning\"\"\"\n\n # with HiddenPrints(): # Suppresses normal print functions\n my_gan = build_and_train(params)\n samples = sampler(my_gan, params)\n loss = optim_loss(samples.data, params)\n\n params['loss'] = loss\n # save_json(params, os.path.join(__file__, ))\n\n del my_gan, samples\n\n # Dictionary with information for evaluation\n return {'loss': loss, 'params': params, 'status': STATUS_OK}\n\n # Trials object to track progress\n bayes_trials = Trials()\n\n best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=max_evals)\n if file_path is not None:\n save_json(best, file_path)\n\n return best, bayes_trials\n","repo_name":"TVSjoberg/gan-thesis","sub_path":"gan_thesis/models/general/optimization.py","file_name":"optimization.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"27634504889","text":"from enum import Enum\n\nclass State(Enum):\n NUM1 = 0\n OP = 1\n NUM2 = 2\n\ndef calculate(op, n1, n2, indent):\n print(indent,\"calculate(\",op,n1,n2,\")\",end='')\n ret = 0\n if op == '+':\n ret = n1 + n2\n elif op == '*':\n ret = n1 * n2\n else:\n raise (Exception(\"huh? unexpected operation\", op))\n print(\" ->\",ret)\n return ret\n\ndef evaluate(qpart, input, indent):\n total = 0\n state = State.NUM1\n acc = 0\n op = None\n\n token = ' '\n while token != None:\n token = next(input, None)\n # print(indent,\"raw t\",token)\n if token == None:\n break\n # print(indent,\"state\",state,\"token\",token,\"acc\",acc,\"op\",op)\n if state == State.NUM1:\n if token == '(':\n acc = evaluate(qpart, input, indent)\n state = State.OP\n elif token == ')':\n print(indent, \"returning\", acc)\n return acc\n else:\n acc = token\n state = State.OP\n elif state == State.OP:\n if token == '+' or token == '*':\n op = token\n state = State.NUM2\n elif token == ')':\n print(indent, \"returning\", acc)\n return acc\n elif state == State.NUM2:\n if token == '(':\n acc = calculate(op, acc, evaluate(qpart, input,indent+' '), indent)\n op = None\n state = State.OP\n elif token == ')':\n acc = calculate(op, acc, token, indent)\n print(indent, \"returning\", acc)\n return acc\n else:\n acc = calculate(op, acc, token, indent)\n state = State.OP\n\n # print(indent,\"at end state:\",state,\"c\",token,\"acc\",acc,\"op\",op)\n # print(indent,\"returning\",acc)\n return acc\n\ndef next_token(line):\n buf = 0\n for c in line:\n if c.isdigit():\n buf = (buf * 10) + int(c)\n else:\n if buf > 0:\n yield buf\n buf = 0\n if c != ' ':\n yield c\n if buf > 0:\n yield buf\n\ndef solve(qpart, filename='input.txt', gens=6):\n print(\"Part \" + str(qpart))\n with open(filename, 'r') as f:\n lines = f.read().splitlines()\n\n result = 0\n for line in lines:\n line = line.strip()\n input = next_token(line)\n print(\"in\",input)\n answer = evaluate(qpart, input, '')\n print(\"line value:\",answer)\n result += answer\n return result\n\nif __name__ == '__main__':\n # answer = solve(1, \"test1.txt\")\n # answer = solve(1, \"test2.txt\")\n answer = solve(1, \"test3.txt\")\n # answer = solve(1, \"test4.txt\")\n # answer = solve(1)\n # print(\"answer:\", answer)\n\n # answer = solve(2, \"test1.txt\")\n # answer = solve(2, \"test2.txt\")\n # answer = solve(2, \"test3.txt\")\n # answer = solve(2, \"test4.txt\")\n # answer = solve(2)\n print(\"answer:\", answer)\n","repo_name":"efficacy/aoc2020","sub_path":"day18/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"27122229444","text":"# HTTP protocol 을 통해서 서버에 요청을 보내면 서버는 그 요청에 맞게 응답을 해주는데 요청에 포함되는 것 중에 HTTP method 라는 것이 있음.\n# 여기에는 몇 가지가 있는데 보편적으로 많이 사용 하는 get, post 방식\n# get : 어떤 내용을 누구나 볼 수 있게 url 에 적어서 보내는 방식. 한번 전송할 때 보낼 수 있는 데이터 양이 제한되어 있음 \n# https://www.coupang.com/np/search?minPrice=1000&maxPrice=100000&page=1\n# ? 뒤에 있는 것들로 부터 변수와 값 \n# post : HTTP Message Body 에 숨겨서 보내는 방식. 보안 데이터를 보낼 때 사용. 제한이 없기 때문에 큰 데이터나 파일 업로드 같은 걸 보낼 수 있음\n# https://www.coupang.com/np/search?id=nadocoing&pw=1234\n# 위와 같으면 보안이 낮아서 위험에 노출 될 가능성이 큼\n\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\n\nurl = \"https://www.coupang.com/np/search?q=%EB%85%B8%ED%8A%B8%EB%B6%81&channel=user&component=&eventCategory=SRP&trcid=&traid=&sorter=scoreDesc&minPrice=&maxPrice=&priceRange=&filterType=&listSize=36&filter=&isPriceRange=false&brand=&offerCondition=&rating=0&page=1&rocketAll=false&searchIndexingToken=&backgroundColor=\"\nheaders = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36\"}\nres = requests.get(url, headers=headers)\nres.raise_for_status()\n\nsoup = BeautifulSoup(res.text, \"lxml\")\n\n# print(res.text)\n\nitems = soup.find_all(\"li\", attrs={\"class\":re.compile(\"^search-product\")})\n# print(items[0].find(\"div\", attrs={\"class\":\"name\"}).get_text())\nfor item in items:\n\n # 광고 제품은 제외\n ad_badge = item.find(\"span\", attrs={\"class\":\"ad-badge-text\"})\n if ad_badge:\n print(\" <광고 상품 제외합니다>\")\n continue\n\n name = item.find(\"div\", attrs={\"class\":\"name\"}).get_text() # 제품명\n \n # 애플 제품 제외\n if \"Apple\" in name:\n print(\" \")\n continue\n\n price = item.find(\"strong\", attrs={\"class\":\"price-value\"}).get_text() # 가격\n\n #리뷰 100개 이상, 평점 4.5 이상 되는 것만 조회\n rate = item.find(\"em\", attrs={\"class\":\"rating\"}) # 평점\n if rate:\n rate = rate.get_text()\n else:\n # rate = \"평점 없음\"\n print(\" <평점 없는 상품 제외합니다>\")\n continue\n\n rate_cnt = item.find(\"span\", attrs={\"class\":\"rating-total-count\"}) # 평점 수\n if rate_cnt:\n rate_cnt = rate_cnt.get_text() # 출력 예: (26)\n rate_cnt = rate_cnt[1:-1] # 1 번째 인덱스 부터 -1 인덱스 까지만\n print(\"리뷰 수\", rate_cnt)\n else:\n # rate_cnt = \"평점 수 없음\"\n print(\" <평점 수 없는 상품 제외합니다>\")\n continue\n \n if float(rate) >= 4.5 and int(rate_cnt) >= 100:\n print(name, \"/\", price, \"원 /\", rate, \"/\", rate_cnt)\n","repo_name":"hoonie96/nadocoding","sub_path":"4_webscraping_basic/9_bs4_coupang.py","file_name":"9_bs4_coupang.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"14908058396","text":"import os\nimport json\nfrom pathlib import Path\nfrom scipy.misc import imread\n\n\ntrain_13_txt = Path('./data/SUNRGBD_train13.txt')\ntest_13_txt = Path('./data/SUNRGBD_test13.txt')\ntrain_37_txt = Path('./data/SUNRGBD_train37.txt')\ntest_37_txt = Path('./data/SUNRGBD_test37.txt')\n\ntrain_13_odgt = Path('./data/train_13_SUNRGBD.odgt')\ntest_13_odgt = Path('./data/test_13_SUNRGBD.odgt')\ntrain_37_odgt = Path('./data/train_37_SUNRGBD.odgt')\ntest_37_odgt = Path('./data/test_37_SUNRGBD.odgt')\n\n\ndef make_SUNRGBD(txt, odgt):\n with txt.open(mode='r') as fi:\n with odgt.open(mode='w+') as fo:\n lines = [line.rstrip('\\n').split(' ') for line in fi]\n for l in lines:\n img = imread('./data/'+l[0], mode='RGB')\n item = {\"width\": img.shape[1], \"fpath_img\": l[0], \"height\": img.shape[0], \"fpath_segm\": l[2]}\n fo.write(f'{json.dumps(item)}\\n')\n\n\ndef main():\n make_SUNRGBD(train_13_txt, train_13_odgt)\n make_SUNRGBD(test_13_txt, test_13_odgt)\n make_SUNRGBD(train_37_txt, train_37_odgt)\n make_SUNRGBD(test_37_txt, test_37_odgt)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kashyap7x/QGN","sub_path":"make_SUNRGBD.py","file_name":"make_SUNRGBD.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"20"} +{"seq_id":"25086200638","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n'''\n@File : demo8.py\n@Time : 2020/10/05 17:10:48\n@Author : Chenghou Wang\n@Contact : wch162@mail.ustc.edu.cn\n@Version : 0.1\n@License : Apache License Version 2.0, January 2004\n@Desc : None\n'''\n\nimport sys\n\ndef fibonacci(n):\n a, b, count = 0, 1, 0\n while True:\n if count > n:\n return\n yield a\n a, b = b, a + b\n count += 1\n\nif __name__ == \"__main__\":\n f = fibonacci(10)\n try:\n print(next(f))\n except StopIteration:\n sys.exit()","repo_name":"qiforan/CSNOTE","sub_path":"notebook/python/src/multi_task/demo8.py","file_name":"demo8.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"7395856797","text":"def get_filter_query(filters, media):\n from dashboard.services.tiles import Tile\n if not filters:\n return None\n else:\n filter_field_mapping = {\n 'star': 'media__stars__name', 'director': 'media__directors__name'\n }\n filter_fields = {}\n for _filter in filters:\n if _filter['value']:\n filter_fields[filter_field_mapping[_filter['name']]] = _filter['value']\n return Tile.get_media_mapping()[media]['model'].objects.filter(**filter_fields)\n","repo_name":"tanmayag8958/movie-fan","sub_path":"backend/dashboard/services/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"20615985626","text":"from django.conf import settings\nfrom django.db.models.signals import m2m_changed\nfrom django.dispatch import receiver\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMultiAlternatives\n\nfrom .models import PostCategory\n\n\ndef send_mails(preview, pk, headline, subscribers_emails):\n # указываем какой шаблон брать за основу и преобразовываем его в строку для отправки подписчику\n html_context = render_to_string(\n 'mail/post_add_email.html',\n {\n 'content': preview,\n 'link': f'{settings.SITE_URL}/news/{pk}'\n }\n )\n\n msg = EmailMultiAlternatives(\n # тема письма\n subject=headline,\n # тело пустое, потому что мы используем шаблон\n body='',\n # адрес отправителя\n from_email=settings.DEFAULT_FROM_EMAIL,\n # список адресатов\n to=subscribers_emails,\n )\n\n msg.attach_alternative(html_context, 'text/html')\n msg.send(fail_silently=False)\n\n\n@receiver(m2m_changed, sender=PostCategory)\ndef post_add_notification(sender, instance, **kwargs):\n if kwargs['action'] == 'post_add':\n categories = instance.category.all()\n subscribers = []\n subscribers_emails = []\n for category in categories:\n subscribers += category.subscribers.all()\n\n for s in subscribers:\n subscribers_emails.append(s.email)\n\n send_mails(instance.preview(), instance.pk, instance.headline, subscribers_emails)\n","repo_name":"DmitriiShilkin/Skillfactory","sub_path":"4 NewsPortal/news/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"13680334321","text":"from mucinfogram import (\n DataSource,\n Parser\n)\nfrom prowl import Prowl\nimport config\n\ndata = DataSource.get_infogram_content()\nincidence = Parser.find_incidence(data)\nupdated = Parser.find_updated(data)\n\nprint('Incident from {} is {}'.format(updated, incidence))\n\nwith open('last_updated.txt', 'a+') as text_file:\n text_file.seek(0)\n if text_file.read() != updated:\n prowl = Prowl(config.prowlApiKey)\n prowStatus = prowl.send_notification(\n 'Neue 7-Tage-Inzidenz von {} für München. Stand: {}'.format(incidence, updated))\n print('Sent notification with status {}'.format(prowStatus))\n\n text_file.seek(0)\n text_file.truncate()\n text_file.write(updated)\n else:\n print('Data not updated. No notification sent')\n","repo_name":"nicolashohm/coronaStatsNotifier","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"71028830130","text":"#!/usr/bin/env python\nf1=open('/Users/gtc/Desktop/work/queue/contacts.dat')\nlines=f1.readlines()\nf1.close()\ndata=[]\nsnode=[]\ndnode=[]\nstarttime=[]\nendtime=[]\nfreq=[]\nfor line in lines:\n vector=line.split('\\t')\n snode.append(int(vector[0]))\n dnode.append(int(vector[1]))\n starttime.append(int(vector[2]))\n endtime.append(int(vector[3]))\n freq.append(int(vector[4]))\nallstart=min(starttime)\nallend=max(endtime)\ntotaltime=float(allend-allstart)\nnodelabel=[]\nallnode=snode+dnode\nallnode.sort()\nfor num in allnode:\n if num in nodelabel:\n continue\n else:\n if num>max(snode):\n break\n nodelabel.append(num)\nmu=[]\nf2=open('/Users/gtc/Desktop/work/queue/mu.txt','w')\ntotalmeet=0.0\nfor node1 in range(1,len(nodelabel)+1):\n mumed=[]\n for node2 in range(1,len(nodelabel)+1):\n meet=0\n for slabel in range(len(snode)):\n if snode[slabel]==node1:\n if dnode[slabel]==node2:\n meet+=1\n if snode[slabel]==node2:\n if dnode[slabel]==node1:\n meet+=1\n totalmeet+=meet\n if meet==0:\n mumed2=1e5\n else:\n mumed2=totaltime/meet\n if node1==node2:\n mumed2=0.0\n mumed.append(mumed2)\n f2.write(str(mumed2))\n f2.write(' ')\n mu.append(mumed)\n f2.write('\\n')\nf2.close()\nprint(totalmeet/totaltime)\n","repo_name":"tianchonggao/spider1","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"28683946517","text":"from django.db import models\nfrom django.utils import timezone\nfrom ctypes import *\n\nclass Cliente(models.Model):\n nome = models.CharField(max_length=200, null=False)\n sobrenome = models.CharField(max_length=200, null=False)\n telefone = models.CharField(max_length=200, null=False)\n email = models.CharField(max_length=200)\n data_cadastro = models.DateTimeField(default=timezone.now, null=False)\n\n def salvar(self):\n self.data_cadastro = timezone.now()\n self.save()\n\n def __str__(self):\n return self.nome\n\n def ident(self):\n ident = str(self.id)\n return ident\n ident.short_description = 'ID'\n\nclass Funcionario(models.Model):\n nome = models.CharField(max_length=200, null=False)\n sobrenome = models.CharField(max_length=200, null=False)\n telefone = models.CharField(max_length=200, null=False)\n email = models.CharField(max_length=200)\n data_admissao = models.DateTimeField(default=timezone.now, null=False)\n data_demissao = models.DateTimeField(blank=True, null=True)\n\n def salvar(self):\n self.data_admissao = timezone.now()\n self.save()\n\n def __str__(self):\n return self.nome\n\n def ident(self):\n ident = str(self.id)\n return ident\n ident.short_description = 'ID'\n\nclass Servico(models.Model):\n nome = models.CharField(max_length=200, null=False)\n descricao = models.TextField()\n valor = models.IntegerField(null=False)\n\n def __str__(self):\n return self.nome\n\n def ident(self):\n ident = str(self.id)\n return ident\n ident.short_description = 'ID'\n\nclass Historico(models.Model):\n cliente = models.ForeignKey(Cliente)\n funcionario = models.ForeignKey(Funcionario)\n servicos = models.ManyToManyField(Servico)\n data_chamado = models.DateTimeField(default=timezone.now, null=False)\n data_realizacao = models.DateTimeField(blank=True, null=True)\n observacao = models.TextField()\n\n def salvar(self):\n self.data_chamado = timezone.now()\n self.save()\n\n def ident(self):\n ident = str(self.id)\n return ident\n ident.short_description = 'ID'\n\n def __str__(self):\n ident = str(self.id)\n return ident\n\n def soma(self):\n a = c_buffer('\\000', 1)\n b = c_buffer('\\000', 4)\n lib = cdll.LoadLibrary(\"c:\\lib_soma.so\")\n valor = c_double(lib.add(a,b))\n return valor.value\n\n def ident2(self):\n ident = str(self.id)\n return ident\n ident.short_description = 'ID'\n\n def valor2(self):\n soma = float(0)\n lib = cdll.LoadLibrary(\"c:\\lib_soma.so\")\n valores = self.servicos.values('valor')\n tam = len(valores)\n x= 0\n for x in range(tam):\n v = self.servicos.values().__getitem__(x).get('valor')\n soma = lib.add(float(soma),float(v))\n return soma\n\n\n def valor(self):\n soma = float(0)\n lib = cdll.LoadLibrary(\"c:\\libteste.so\")\n valores = self.servicos.values('valor')\n tam = len(valores)\n x= 0\n for x in range(tam):\n soma = float(soma) + int(self.servicos.values().__getitem__(x).get('valor'))\n self.valor_total = soma\n return soma\n\n","repo_name":"joaopaulo164/Order-Sys-ToMany","sub_path":"manager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"8770807256","text":"# These are the signals handlers that fetch and feed old and new instance data to the python logger.\n\nfrom audit_logger.logger import AuditLogger\nfrom django.conf import settings\nfrom audit_logger.middleware import get_latest_request\n\naudit_log_format = settings.AUDIT_LOG_FORMAT\naudit_log_level = settings.AUDIT_LOG_LEVEL\n\n\naudit = AuditLogger(audit_log_format, audit_log_level)\n\n\ndef _current_user():\n request = get_latest_request()\n return request.user if hasattr(request, 'user') else None\n\n\nclass Receivers:\n old_instance = None # closure function\n old_m2m_instances = None\n\n\ndef _capture_old_instances(sender, instance):\n try:\n old = sender.objects.get(pk=instance.id)\n except sender.DoesNotExist:\n old = None\n\n def _closure():\n return old\n\n Receivers.old_instance = _closure\n\n\ndef presave(sender, instance, raw, using, update_fields, **kwargs):\n _capture_old_instances(sender, instance)\n\n\ndef predelete(sender, instance, using, **kwargs):\n _capture_old_instances(sender, instance)\n\n\ndef postdelete(sender, instance, using, **kwargs):\n request = get_latest_request()\n\n if not request:\n return ''\n\n actor = _current_user()\n old = Receivers.old_instance()\n\n try:\n new = sender.objects.get(pk=instance.id) # not expected, could be used to verify\n except sender.DoesNotExist:\n new = None # expected\n\n message = audit.log_action(actor, request, \"DELETED\", old, new)\n\n return message\n\n\ndef postsave(sender, instance, created, raw, using, update_fields, **kwargs):\n request = get_latest_request()\n\n if not request:\n return ''\n\n actor = _current_user()\n old = Receivers.old_instance()\n new = sender.objects.get(pk=instance.id)\n action = \"CREATED\" if created else \"UPDATED\"\n\n message = audit.log_action(actor, request, action, old, new)\n\n return message\n\n\n# https://docs.djangoproject.com/en/3.1/ref/signals/#django.db.models.signals.m2m_changed\ndef m2mchanged(sender, instance, action, reverse, model, pk_set, using, **kwargs):\n \"\"\"Dependent upon url structure Ex, /applications/1/exceptions/ yields 'exceptions' as the target m2m field\"\"\"\n actor = _current_user()\n request = get_latest_request()\n\n if not request:\n return ''\n\n if action in ['pre_add', 'post_add']:\n m2m_field_name = request.path.split('/')[-2]\n else:\n m2m_field_name = request.path.split('/')[-3]\n\n m2m_field_target = getattr(instance, m2m_field_name, None)\n\n # guard for mismatched path (the m2m field name) in the request during testing\n if not m2m_field_target:\n return ''\n\n if action in ['pre_remove', 'pre_add']:\n _capture_old_instances(m2m_field_target)\n\n elif action in ['post_remove', 'post_add']:\n new_m2m_instances = [str(row) for row in m2m_field_target.all()]\n old_m2m_instances = Receivers.old_m2m_instances()\n\n if action == 'post_remove':\n audit.log_action(actor, request, \"DELETED\", old_m2m_instances, new_m2m_instances, m2ms=True)\n else:\n audit.log_action(actor, request, \"CREATED\", old_m2m_instances, new_m2m_instances, m2ms=True)\n\n","repo_name":"theTisrock/django_audit_logger","sub_path":"audit_logger/receivers.py","file_name":"receivers.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"72777097010","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom django.core.management.base import BaseCommand\n\nfrom simon_app.models import *\nfrom random import shuffle\nimport datetime\nimport logging\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom simon_app.functions import GMTUY\nfrom simon_app.decorators import chatty_command\nfrom random import shuffle\n\n\n# @chatty_command(command=\"HTTPS Check\")\nclass Command(BaseCommand):\n threads = 10\n max_job_queue_size = 200 # 0 for limitless\n max_points = 20 # 0 for limitless\n\n def handle(self, *args, **options):\n def do_work(tp):\n try:\n current_check = tp.check_point(timeout=10, save=False, protocol=\"https\")\n latest_https_check = tp.get_latest_https_check()\n if latest_https_check is None or latest_https_check.status != current_check:\n print(latest_https_check)\n # persist on change\n check = HttpsCheck(test_point=tp, status=current_check)\n tp.httpscheck_set.add(check)\n\n except Exception as e:\n logging.error(e)\n pass\n\n finally:\n return\n\n tps = [tp for tp in SpeedtestTestPoint.objects.all()]\n shuffle(tps)\n\n thread_pool = ThreadPool(self.threads)\n\n then = datetime.datetime.now(tz=GMTUY())\n\n if self.max_points > 1:\n tps = tps[:self.max_points]\n elif self.max_points == 1:\n tps = [tps[0]]\n\n tps = list(tps)\n shuffle(tps) # shuffle in case the script get aborted (do not run only the small alphanumeric tps only)\n\n logging.info(\"TPs: %s \" % (len(tps)))\n logging.info(\"Launching %.0f worker threads on a %.0f jobs queue\" % (self.threads, len(tps)))\n thread_pool.map(do_work, tps)\n\n thread_pool.close()\n thread_pool.join()\n\n logging.info(\"Command ended with %.0f worker threads on a %.0f jobs queue\" % (self.threads, len(tps)))\n logging.info(\"Command took %s\" % (datetime.datetime.now(tz=GMTUY()) - then))\n","repo_name":"LACNIC/simon","sub_path":"simon-web/simon_app/management/commands/https_check.py","file_name":"https_check.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"20"} +{"seq_id":"18007314999","text":"class TreeNode:\n def __init__(self, value):\n self.left = None\n self.right = None\n self.val = value\n\nclass Solution:\n def preorderTraversal(self, root):\n result = []\n def traverse(root):\n if(root == None): \n return\n #adding root to result\n result.append(root.val) \n #traversing the left subtree\n traverse(root.left) \n #traversing the right subtree\n traverse(root.right) \n traverse(root)\n return result\n\n\nif __name__ == '__main__':\n root = TreeNode(1)\n root.right = TreeNode(2)\n root.right.left = TreeNode(3)\n s = Solution()\n print(s.preorderTraversal(root))\n\n\n# Time Complexity -> O(n) -> T(n) = 2(T/2) + 1\n# Space Complexity -> O(n) ","repo_name":"mrunalpatil1198/Leetcode","sub_path":"Depth First Search/144-BinaryTreePreorderTraversal.py","file_name":"144-BinaryTreePreorderTraversal.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"28902283142","text":"from dao.AddressesDao import AddressesDao\nfrom dao.TransactionDao import TransactionDao\n\n\nclass TransactionAddressInfo:\n def __init__(self, timestamp, address, transaction_id):\n self.timestamp = timestamp\n self.address = address\n self.transaction_id = transaction_id\n\n\ndef detect_transfers_for_user(user_id):\n address_dao = AddressesDao()\n transaction_dao = TransactionDao()\n address_records = address_dao.getUserWalletAddresses(user_id)\n wallets = address_records.addresses\n transaction_dict = _build_transaction_information(transaction_dao, wallets)\n valid_transactions_ids = []\n output = []\n for wallet_address in wallets:\n wallet_transactions = transaction_dao.getTransaction(wallet_address)\n for transaction_index in range(len(wallet_transactions)):\n transaction = wallet_transactions[transaction_index]\n search_key = None\n if transaction.type == 'in':\n search_key = \"{0}-{1}-{2}\".format(transaction.amount, 'out', transaction.coin)\n else:\n search_key = \"{0}-{1}-{2}\".format(transaction.amount, 'in', transaction.coin)\n\n if search_key in transaction_dict.keys():\n # validation logic here\n\n for address_info_index in range(len(transaction_dict[search_key])):\n address_information = transaction_dict[search_key][address_info_index]\n print(address_information.address, transaction.address)\n if transaction.address != address_information.address and \\\n address_information.address not in valid_transactions_ids:\n if transaction.type == 'out':\n ## widthdraws can only happen before deposits so verify timestamps make sense\n if transaction.timestamp < address_information.timestamp:\n\n ## Success, we add it to the list. set the address as visited to true\n ## We also break from this loop because we already found a matching pair so it can move now to\n ## next element\n output.append((transaction.transaction_id, address_information.transaction_id))\n wallet_transactions.pop(transaction_index)\n transaction_dict[search_key].pop(address_info_index)\n break\n\n else:\n # its an deposit so its the opposite\n if transaction.timestamp > address_information.timestamp:\n ## Success!\n output.append((address_information.transaction_id, transaction.transaction_id))\n wallet_transactions.pop(transaction_index)\n transaction_dict[search_key].pop(address_info_index)\n break\n\n return output\n\n\ndef _build_transaction_information(transaction_dao, wallet_records):\n transaction_dict = dict()\n for wallet_address in wallet_records:\n wallet_transactions = transaction_dao.getTransaction(wallet_address)\n for transaction in wallet_transactions:\n amount_type_coin_key = \"{0}-{1}-{2}\".format(transaction.amount, transaction.type, transaction.coin)\n if amount_type_coin_key not in transaction_dict.keys():\n transaction_dict[amount_type_coin_key] = []\n transaction_dict[amount_type_coin_key].append(\n TransactionAddressInfo(transaction.timestamp, transaction.address, transaction.transaction_id))\n\n return transaction_dict\n","repo_name":"venappea/ct-test","sub_path":"components/TransferDetector.py","file_name":"TransferDetector.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"13940971115","text":"import json\nfrom flask import session as login_session\nfrom flask import render_template, request, redirect, url_for\n\nfrom lib.database_setup import User, Category, Item\nfrom lib import session\n\n\ndef home():\n left_content = [item.to_dict() for item in Category.all()]\n right_content = [item.to_full_dict() for item in Item.all(10)]\n data = {\n \"right_title\": \"Last items\",\n \"left_content\": left_content,\n \"right_content\": right_content,\n \"with_parrent\": True\n }\n return render_template(\"listing.html\", data=data)\n\ndef error():\n message = request.values.get(\"message\", \"An error has occured\")\n code = request.values.get(\"code\", 500)\n return render_template(\"error.html\", message=message, code=code)\n\ndef category(category_id):\n category = Category(id=category_id).get()\n left_content = [item.to_dict() for item in Category.all()]\n right_content = [item.to_dict() for item in Item.find(category_id=category_id)]\n data = {\n \"category\": category.to_dict(),\n \"right_title\": \"{} Items ({} items)\".format(category.name, len(right_content)),\n \"left_content\": left_content,\n \"right_content\": right_content,\n \"with_parrent\": False\n }\n return render_template(\"listing.html\", data=data)\n\n\ndef item(item_id):\n item = Item(id=item_id).get()\n return render_template(\"item.html\", data=item.to_dict())\n\n\n@session.ensure_logged_in\ndef create_item(user):\n if request.method == 'POST':\n category_id = int(request.form[\"category_id\"])\n item = Item(name=request.form[\"name\"],\n description=request.form[\"description\"],\n user_id=login_session[\"user_id\"],\n category_id=category_id,\n category=Category(id=category_id).get()\n ).create()\n item.add_picture(request)\n return redirect(url_for(\"item\", item_id=item.id))\n else:\n data = {\n \"type\": \"item\",\n \"action\": \"Create\",\n \"categories\": [item.to_dict() for item in Category.all()]\n }\n return render_template(\"create_edit.html\", data=data)\n\n\n@session.ensure_logged_in\ndef edit_item(user, item_id):\n item = Item(id=item_id).get()\n session.ensure_permission(user, item)\n if request.method == 'POST':\n category_id = int(request.form[\"category_id\"])\n item.name=request.form[\"name\"]\n item.description=request.form[\"description\"]\n item.category_id=category_id\n item.category=Category(id=category_id).get()\n item.save()\n item.add_picture(request)\n return redirect(url_for(\"item\", item_id=item_id))\n else:\n data=item.to_dict()\n data[\"type\"] = \"item\"\n data[\"action\"] = \"Edit\"\n data[\"categories\"] = [item.to_dict() for item in Category.all()]\n return render_template(\"create_edit.html\", data=data)\n\n\n@session.ensure_logged_in\ndef delete_item(user, item_id):\n item = Item(id=item_id).get()\n session.ensure_permission(user, item)\n if request.method == 'POST':\n item.delete()\n return redirect(url_for(\"home\"))\n else:\n data=item.to_dict()\n data[\"type\"] = \"item\"\n return render_template(\"delete.html\", data=data)\n\n\n\n@session.ensure_logged_in\ndef create_category(user):\n if request.method == 'POST':\n category = Category(name=request.form[\"name\"],\n user_id=login_session[\"user_id\"]\n ).create()\n category.add_picture(request)\n return redirect(url_for(\"category\", category_id=category.id))\n else:\n data = {\n \"type\": \"category\",\n \"action\": \"Create\"\n }\n return render_template(\"create_edit.html\", data=data)\n\n\n@session.ensure_logged_in\ndef edit_category(user, category_id):\n category = Category(id=category_id).get()\n session.ensure_permission(user, category)\n if request.method == 'POST':\n category.name=request.form[\"name\"]\n category.save()\n category.add_picture(request)\n return redirect(url_for(\"category\", category_id=category_id))\n else:\n data=category.to_dict()\n data[\"type\"] = \"category\"\n data[\"action\"] = \"Edit\"\n return render_template(\"create_edit.html\", data=data)\n\n\n@session.ensure_logged_in\ndef delete_category(user, category_id):\n category = Category(id=category_id).get()\n session.ensure_permission(user, category)\n if request.method == 'POST':\n items = [item.delete() for item in Item.find(category_id=category_id)]\n category.delete()\n return redirect(url_for(\"home\"))\n else:\n data=category.to_dict()\n data[\"type\"] = \"category and all sub-items\"\n return render_template(\"delete.html\", data=data)\n\n\ndef fbconnect():\n session.connect_user_through_facebook(request)\n return \"Logged in!\"\n\ndef gconnect():\n session.connect_user_through_google(request)\n return \"Logged in!\"\n\n@session.ensure_logged_in\ndef logout(user):\n if login_session[\"provider\"] == \"google\":\n session.disconnect_user_through_google()\n else:\n session.disconnect_user_through_facebook()\n login_session.clear()\n return redirect(\"/\")\n\n\n","repo_name":"birocorneliu/item_catalog","sub_path":"paths/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"37481204066","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Dict, Optional, Set, Tuple\n\nfrom pygments.lexers import PythonLexer\nfrom rich.segment import Segment\nfrom rich.syntax import Syntax as RichSyntax\n\nif TYPE_CHECKING:\n from rich.console import Console, ConsoleOptions, RenderResult\n\n\nclass Syntax(RichSyntax):\n def __init__(\n self,\n code: str,\n line_range: Tuple[int, int],\n highlight_lines: Optional[Set[int]] = None,\n line_offset: int = 0,\n line_annotations: Optional[Dict[int, str]] = None,\n ) -> None:\n self.line_offset = line_offset\n self.line_annotations = line_annotations or {}\n\n super().__init__(\n code=code,\n lexer=PythonLexer(),\n line_numbers=True,\n word_wrap=False,\n theme=\"ansi_light\",\n highlight_lines=highlight_lines,\n line_range=line_range,\n )\n\n def __rich_console__(\n self, console: Console, options: ConsoleOptions\n ) -> RenderResult:\n assert self.line_range\n\n segments = self._get_syntax(console, options)\n annotations = self.line_annotations.copy()\n current_line = self.line_range[0] or 0\n\n for segment in segments:\n if segment.text == \"\\n\":\n # 3 = | + space + space\n prefix = \" \" * (self._numbers_column_width + 3)\n\n annotation = annotations.pop(current_line, None)\n\n current_line += 1\n\n if annotation:\n yield \"\"\n yield prefix + annotation\n\n continue\n\n yield segment\n\n if segment.text.strip() == str(current_line):\n yield Segment(\"| \")\n","repo_name":"strawberry-graphql/strawberry","sub_path":"strawberry/exceptions/syntax.py","file_name":"syntax.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":3532,"dataset":"github-code","pt":"20"} +{"seq_id":"36449465416","text":"#!/usr/bin/env python\n\nimport scanpy as sc\nimport tangram as tg\nimport pandas as pd\nimport sys\nimport matplotlib.pyplot as plt\nimport torch\n\n\n\nprint(sys.argv[1])\nadata_st = sc.read_visium(path=sys.argv[1])\nadata_sc= sc.read(filename=sys.argv[2])\n\nadata_sc.X=adata_sc.raw.X.copy()\n\n\ntg.pp_adatas(adata_sc,adata_st,genes=None)\n\n\nif torch.cuda.is_available():\n ad_map = tg.map_cells_to_space(\n adata_sc, \n adata_st,device=\"cuda:0\")\nelse:\n ad_map = tg.map_cells_to_space(\n adata_sc, \n adata_st)\n\n\nad_ge = tg.project_genes(ad_map, adata_sc)\n\ngenes=[\"LYVE1\",\"F13A1\",\"FOLR2\",\"SELENOP\",\"APOE\",\"SLC40A1\",\"C1QB\",\"DAB2\",\"PDK4\",\"SPP1\",\"ACP5\",\"CD9\",\"FCER1A\",\"CD1C\",\"CLEC10A\",\"HSPA6\",\"DNAJB1\",\n \"HSPA1B\",\"S100A8\",\"S100A9\",\"S100A12\",\"EREG\",\"G0S2\",\"FCN1\",\"CCL20\",\"IL1B\",\"IL23A\",\"CXCL10\",\"CXCL9\",\"GBP1\",\"CDC1C\",\"CCL3L1\",\n \"CCL3\",\"CCL4L2\",\"MT1X\",\"MT1E\",\"CTSL\",\"RGS1\",\"FOS\",\"DNASE1L3\",\"MMP9\",\"LYZ\",\"AREG\",\"VCAN\",\"HSPA1A\",\"MARCO\",\"COLEC12\"]\n\ngenes=list(set(list(ad_ge.var[\"features\"].to_numpy())) & set(genes))\ngenes=list(map(lambda x: x.lower(),genes))\n\n\nexpid=list(adata_st.uns['spatial'])[0]\n\nscale_fac=adata_st.uns['spatial'][expid]['scalefactors']['tissue_hires_scalef']\n\npl=tg.plot_genes_sc(genes, adata_measured=adata_st, adata_predicted=ad_ge, perc=0.02,scale_factor=scale_fac,spot_size=40,return_figure=True)\n\nplt.rcParams['figure.figsize'] = [15, 15]\nplt.rcParams['figure.dpi'] = 150\n\n\npl.savefig(sys.argv[3], format=\"pdf\", bbox_inches=\"tight\")\n","repo_name":"sinanugur/spatial-workflow","sub_path":"workflow/scripts/spatial-tangram-gene.py","file_name":"spatial-tangram-gene.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"17637316259","text":"import gymnasium as gym\nimport numpy as np\nfrom gymnasium.wrappers.normalize import NormalizeReward\n\nimport sinergym\nfrom sinergym.utils.wrappers import (LoggerWrapper, NormalizeAction,\n NormalizeObservation)\n\nenv = gym.make('Eplus-demo-v1')\nenv = NormalizeAction(env)\nenv = NormalizeObservation(env)\nenv = NormalizeReward(env)\nenv = LoggerWrapper(env)\n\nfor i in range(1):\n obs, info = env.reset()\n rewards = []\n terminated = False\n current_month = 0\n while not terminated:\n a = env.action_space.sample()\n obs, reward, terminated, truncated, info = env.step(a)\n rewards.append(reward)\n if info['month'] != current_month: # display results every month\n current_month = info['month']\n print('Reward: ', sum(rewards), info)\n print(\n 'Episode ',\n i,\n 'Mean reward: ',\n np.mean(rewards),\n 'Cumulative reward: ',\n sum(rewards))\nenv.close()\n","repo_name":"ugr-sail/sinergym","sub_path":"scripts/try_env.py","file_name":"try_env.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"20"} +{"seq_id":"71768443569","text":"class Solution(object):\n def missingNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n length = len(nums)\n numList = (length+1) * [0]\n\n for num in nums:\n numList[num] = 1\n\n # print(numList)\n\n for i in range(length+1):\n if numList[i] == 0:\n return i\n\nif __name__ == '__main__':\n nums = [3, 0, 1]\n # nums = [0, 1]\n # nums = [9, 6, 4, 2, 3, 5, 7, 0, 1]\n # nums = [0]\n solution = Solution()\n result = solution.missingNumber(nums)\n print(result)\n","repo_name":"xiaolizihahaha/LeetCode","sub_path":"268.py","file_name":"268.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"18232422353","text":"import zmq \nimport sys \nimport random\nimport time\n\nport = '5555' \npub_server_name = 'pub-server01' \ncontext = zmq.Context() \nsocket = context.socket(zmq.PUB) \nsocket.bind('tcp://*:%s'%port) \n \nwhile True: \n topic = random.randrange(9999,10005) \n messagedata = random.randrange(1,215)-80 \n print ('topic:%s messagedata:%s'%(topic,messagedata)) \n socket.send_string('%d %d %s'%(topic,messagedata,pub_server_name)) \n time.sleep(1) \n","repo_name":"lqiqiqi/self-driving","sub_path":"Zmq_PUB1.py","file_name":"Zmq_PUB1.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"4870269507","text":"#!/usr/bin/python3.5\n\n# -*- coding: utf-8 -*-\nimport requests\nimport logging\nimport argparse\nimport math\nfrom influxdb import InfluxDBClient\n\n\"\"\"Python bridge between Airly.eu API and InfluxDB.\"\"\"\n\"\"\"This code is meant to be started by crond every 10 minutes.\"\"\"\n\nparser = argparse.ArgumentParser(description='Fetches air quality data from\\\n Airly.eu sensor and pushes them into InfluxDB')\nparser.add_argument(\"--verbose\",\n help='Set verbosity level',\n choices=['DEBUG', 'INFO', 'WARNING', 'CRITICAL'],\n default='CRITICAL'\n )\nparser.add_argument('--airly_sensorid',\n help='''Airly.eu sensor ID,\n fetch it from https://developer.airly.eu/docs''',\n required=True\n )\nparser.add_argument('--airly_apikey',\n help='''Airly.eu API access token,\n get it on https://developer.airly.eu/docs''',\n required=True\n )\nparser.add_argument('--airly_url',\n help='Airly API URL',\n default='https://airapi.airly.eu/v1/sensor/measurements'\n )\nparser.add_argument('--InfluxDB_host', default='localhost')\nparser.add_argument('--InfluxDB_port', default='8086')\nparser.add_argument('--InfluxDB_user', default='root')\nparser.add_argument('--InfluxDB_password', default='root')\nparser.add_argument('--InfluxDB_database', required=True)\n\nargs = parser.parse_args()\nif args.verbose:\n logging.basicConfig(level=args.verbose)\n\n\ndef get_airly(airly_sensorid, airly_apikey, airly_url):\n payload = {\n 'sensorId': airly_sensorid,\n 'apikey': airly_apikey\n }\n logging.debug('Requesting JSON from ' + airly_url)\n response = requests.get(airly_url, params=payload)\n _airly_json = response.json()\n _airly_pm1 = math.floor(_airly_json['currentMeasurements']['pm1'])\n _airly_pm25 = math.floor(_airly_json['currentMeasurements']['pm25'])\n _airly_pm10 = math.floor(_airly_json['currentMeasurements']['pm10'])\n _airly_pressure = math.floor(_airly_json['currentMeasurements']['pressure'])\n _airly_humidity = math.floor(_airly_json['currentMeasurements']['humidity'])\n _airly_temperature = _airly_json['currentMeasurements']['temperature']\n logging.debug('Received PM1: ' + str(_airly_pm1))\n logging.debug('Received PM2.5: ' + str(_airly_pm25))\n logging.debug('Received PM10: ' + str(_airly_pm10))\n logging.debug('Received pressure: ' + str(_airly_pressure))\n logging.debug('Received humidity: ' + str(_airly_humidity))\n logging.debug('Received temperature: ' + str(_airly_temperature))\n\n return {\n 'PM1': _airly_pm1,\n 'PM25': _airly_pm25,\n 'PM10': _airly_pm10,\n 'pressure': _airly_pressure,\n 'temperature': _airly_temperature,\n 'humidity': _airly_humidity\n }\n\n\ndef wite_to_InfluxDB(InfluxDB_host,\n InfluxDB_port,\n InfluxDB_user,\n InfluxDB_password,\n InfluxDB_database,\n values,\n sensorId):\n client = InfluxDBClient()\n client = InfluxDBClient(\n host=InfluxDB_host,\n port=InfluxDB_port,\n username=InfluxDB_user,\n password=InfluxDB_password,\n database=InfluxDB_database\n )\n logging.debug('Connected to InfluxDB')\n json_body = [\n {\n \"measurement\": \"AQI\",\n \"tags\": {\n \"sensorId\": sensorId,\n },\n \"fields\": values\n }\n ]\n logging.debug(json_body)\n client.write_points(points=json_body, time_precision='ms')\n logging.info('Sent metrics to InfluxDB')\n\n\nvalues = get_airly(airly_sensorid=args.airly_sensorid,\n airly_apikey=args.airly_apikey,\n airly_url=args.airly_url)\nwite_to_InfluxDB(InfluxDB_host=args.InfluxDB_host,\n InfluxDB_port=args.InfluxDB_port,\n InfluxDB_user=args.InfluxDB_user,\n InfluxDB_password=args.InfluxDB_password,\n InfluxDB_database=args.InfluxDB_database,\n values=values,\n sensorId=args.airly_sensorid\n )\n","repo_name":"KrzysztofHajdamowicz/airly-to-influxdb","sub_path":"airly.py","file_name":"airly.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"36326809573","text":"import torch.nn.functional as F\r\n\r\nHTRPOconfig = {\r\n 'reward_decay': 0.98,\r\n 'cg_damping': 1e-3,\r\n 'GAE_lambda': 0.,\r\n 'max_kl_divergence': 2e-5,\r\n 'entropy_weight': 0,\r\n 'per_decision': True,\r\n 'weighted_is': True,\r\n 'using_active_goals' : True,\r\n 'hidden_layers': [32, 32, 32],\r\n 'hidden_layers_v': [32, 32, 32],\r\n 'max_grad_norm': None,\r\n 'lr_v': 5e-5,\r\n 'iters_v':10,\r\n # for comparison with HPG\r\n 'lr': 3e-5,\r\n # NEED TO FOCUS ON THESE PARAMETERS\r\n 'using_hpg': False,\r\n 'steps_per_iter': 300,\r\n 'sampled_goal_num': 16,\r\n 'value_type': None,\r\n 'using_original_data': False,\r\n 'using_kl2':True,\r\n 'policy_type':'Conv',\r\n}\r\nHTRPOconfig['memory_size'] = HTRPOconfig['steps_per_iter']\r\n","repo_name":"HTRPOCODES/HTRPO-v2","sub_path":"configs/HTRPO_MsPacman.py","file_name":"HTRPO_MsPacman.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"20"} +{"seq_id":"27009392012","text":"import time\r\nfrom tkinter import *\r\nroot = Tk()\r\nroot.geometry(\"960x550+0+0\")\r\nroot.title(\"Šifrovacia aplikácia\")\r\n\r\nlabelx = Label(root, text=\"Prosím zadajte otvorený alebo zašifrovaný text.\")\r\nlabelx.pack()\r\ninput_entry = Entry(root)\r\ninput_entry.pack()\r\nlocal = time.localtime()\r\n\r\nlabel = Label(root, text=\"zašifrovaný text\")\r\nlabel.pack()\r\n\r\ndef sifruj():\r\n r=0\r\n k=format(local[0]) # zistí rok\r\n for j in k:\r\n r+=int(j)\r\n m=int(format(local[1])) # zistí mesiac\r\n d=int(format(local[2])) # zistí deň\r\n h=int(format(local[3])) # zistí hodinu\r\n znaky=['A','a','Q','q','y','Y',',',' ','.','W','w','?','s','S','<','>',':','E','e','X','x','r','R','D','d','c','C','V','v','b','B','ô','ú','ä','ň','t','T','Z','z','f','F','h','H','n','N','M','m','-','_','u','U','i','I','J','j','k','K','P','p','o','O','l','L','!','(',')','ď','Ď','@','ľ','š','Š','č','Č','ť','Ť','ž','Ž','ó','ý','á','í','é','=','1','*','2','/','3','4','5','6','+','8','9','7','&','[',']','€','ř',';','0','~','%','#','ŕ','ĺ']\r\n počet=len(znaky)//2\r\n text=input_entry.get()\r\n safe=''\r\n n=d+h-m-r\r\n for e in text:\r\n poloha=0\r\n posun=0\r\n for i in znaky:\r\n if i==e:\r\n if poloha < počet:\r\n posun=poloha+n\r\n safe+=znaky[posun]\r\n elif poloha > počet:\r\n posun=poloha-n\r\n safe+=znaky[posun]\r\n else:\r\n poloha+=1\r\n safe+=str(n)\r\n label.config(text=safe)\r\n\r\nbutton = Button(root, text=\"Šifruj\", command=sifruj)\r\nbutton.pack()\r\n\r\ndef desifruj():\r\n znaky=['A','a','Q','q','y','Y',',',' ','.','W','w','?','s','S','<','>',':','E','e','X','x','r','R','D','d','c','C','V','v','b','B','ô','ú','ä','ň','t','T','Z','z','f','F','h','H','n','N','M','m','-','_','u','U','i','I','J','j','k','K','P','p','o','O','l','L','!','(',')','ď','Ď','@','ľ','š','Š','č','Č','ť','Ť','ž','Ž','ó','ý','á','í','é','=','1','*','2','/','3','4','5','6','+','8','9','7','&','[',']','€','ř',';','0','~','%','#','ŕ','ĺ']\r\n počet=len(znaky)//2\r\n text=input_entry.get()\r\n h=int(text[-2:])\r\n text=text[:-2]\r\n original=''\r\n for e in text:\r\n poloha=0\r\n posun=0\r\n for i in znaky:\r\n if i==e:\r\n if poloha < 40:\r\n posun=poloha+n\r\n original+=znaky[posun]\r\n elif poloha > 40:\r\n posun=poloha-n\r\n original+=znaky[posun]\r\n else:\r\n poloha+=1\r\n\r\n label.config(text=original)\r\n\r\nbutton1 = Button(root, text=\"Dešifruj\", command=desifruj)\r\nbutton1.pack()\r\n\r\nroot.mainloop()\r\n","repo_name":"Albo112233/bakalarska_praca","sub_path":"šifry/moja_šifra.py","file_name":"moja_šifra.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"43284424172","text":"import random\nfrom tkinter import *\nfrom PIL import ImageTk, Image\nfrom tkinter import messagebox\n\ntry:\n screen = Tk()\n\n count = 0\n X_score = 0\n O_score = 0\n screen.title(\"tic tac toe\")\n\n\n def breaks():\n global count\n a = [b1, b2, b3, b4, b5, b6, b7, b8, b9]\n if count < 9:\n for i in a:\n i['state'] = DISABLED\n count = 9\n\n\n def com_choice():\n global count\n a = [b1, b2, b3, b4, b5, b6, b7, b8, b9]\n com = random.choice(a)\n you_can = 0\n if count != 9:\n if com['state'] != DISABLED:\n com['image'] = o_img\n com['text'] = \"O\"\n winners()\n com['state'] = DISABLED\n count += 1\n\n else:\n for i in a:\n if i['state'] == NORMAL:\n you_can += 1\n if you_can > 1:\n com_choice()\n else:\n pass\n\n\n winner = True\n\n\n def winners():\n global b1, b2, b3, b4, b5, b6, b7, b8, b9, winner, X_score, O_score, count\n if count <= 9:\n if (b1['text'] == \"X\" and b2['text'] == \"X\" and b3['text'] == \"X\") or (\n b1['text'] == \"X\" and b4['text'] == \"X\" and b7['text'] == \"X\") or (\n b1['text'] == \"X\" and b5['text'] == \"X\" and b9['text'] == \"X\") or (\n b7['text'] == \"X\" and b8['text'] == \"X\" and b9['text'] == \"X\") or (\n b3['text'] == \"X\" and b6['text'] == \"X\" and b9['text'] == \"X\") or (\n b3['text'] == \"X\" and b5['text'] == \"X\" and b7['text'] == \"X\") or (\n b4['text'] == \"X\" and b5['text'] == \"X\" and b6['text'] == \"X\") or (\n b2['text'] == \"X\" and b5['text'] == \"X\" and b8['text'] == \"X\"):\n messagebox.showinfo(\"showinfo\", \"you won\")\n X_score += 1\n buttons()\n\n count = 0\n\n winner = False\n # breaks()\n\n elif (b1['text'] == \"O\" and b2['text'] == \"O\" and b3['text'] == \"O\") or (\n b1['text'] == \"O\" and b4['text'] == \"O\" and b7['text'] == \"O\") or (\n b1['text'] == \"O\" and b5['text'] == \"O\" and b9['text'] == \"O\") or (\n b7['text'] == \"O\" and b8['text'] == \"O\" and b9['text'] == \"O\") or (\n b3['text'] == \"O\" and b6['text'] == \"O\" and b9['text'] == \"O\") or (\n b3['text'] == \"O\" and b5['text'] == \"O\" and b7['text'] == \"O\") or (\n b4['text'] == \"O\" and b5['text'] == \"O\" and b6['text'] == \"O\") or (\n b2['text'] == \"O\" and b5['text'] == \"O\" and b8['text'] == \"O\"):\n\n messagebox.showinfo(\"showinfo\", \"computer won\")\n O_score += 1\n buttons()\n\n count = 0\n\n winner = False\n # breaks()\n if count == 9:\n buttons()\n\n\n def click(b):\n global count\n if count == 9:\n pass\n if count != 9:\n b['image'] = x_img\n b['text'] = \"X\"\n winners()\n b['state'] = DISABLED\n\n com_choice()\n count += 1\n\n\n # images for X and O and also black image\n\n empty_img = ImageTk.PhotoImage(Image.open(\"images/empty.png\"))\n x_img = ImageTk.PhotoImage(Image.open(\"images/x.png\"))\n o_img = ImageTk.PhotoImage(Image.open(\"images/o.png\"))\n\n\n def buttons():\n global b1, b2, b3, b4, b5, b6, b7, b8, b9, X_score, O_score, count\n count = 0\n b1 = Button(screen, text=\"\", image=empty_img, command=lambda: click(b1), padx=50, pady=50, borderwidth=0)\n b2 = Button(screen, text=\"\", image=empty_img, command=lambda: click(b2), padx=50, pady=50, borderwidth=0)\n b3 = Button(screen, text=\"\", image=empty_img, command=lambda: click(b3), padx=50, pady=50, borderwidth=0)\n\n b4 = Button(screen, text=\"\", image=empty_img, command=lambda: click(b4), padx=50, pady=50, borderwidth=0)\n b5 = Button(screen, text=\"\", image=empty_img, command=lambda: click(b5), padx=50, pady=50, borderwidth=0)\n b6 = Button(screen, text=\"\", image=empty_img, command=lambda: click(b6), padx=50, pady=50, borderwidth=0)\n\n b7 = Button(screen, text=\"\", image=empty_img, command=lambda: click(b7), padx=50, pady=50, borderwidth=0)\n b8 = Button(screen, text=\"\", image=empty_img, command=lambda: click(b8), padx=50, pady=50, borderwidth=0)\n b9 = Button(screen, text=\"\", image=empty_img, command=lambda: click(b9), padx=50, pady=50, borderwidth=0)\n\n b10 = Label(screen, text=\"O score\", padx=40, pady=40)\n b10.grid(row=0, column=3)\n\n b10 = Label(screen, text=\"X score\", padx=40, pady=40)\n b10.grid(row=1, column=3)\n\n b11 = Button(screen, text=\"reset\", padx=40, pady=40, borderwidth=0, command=buttons)\n b11.grid(row=2, column=3)\n\n b12 = Button(screen, text=\"exit\", bg=\"red\", fg=\"white\", padx=40, pady=40, command=screen.quit)\n b12.grid(row=2, column=4)\n\n b13 = Label(screen, text=X_score, font=(\"Arial\", 25))\n b13.grid(row=1, column=4)\n\n b14 = Label(screen, text=O_score, font=(\"Arial\", 25))\n b14.grid(row=0, column=4)\n\n b1.grid(row=0, column=0)\n b2.grid(row=0, column=1)\n b3.grid(row=0, column=2)\n\n b4.grid(row=1, column=0)\n b5.grid(row=1, column=1)\n b6.grid(row=1, column=2)\n\n b7.grid(row=2, column=0)\n b8.grid(row=2, column=1)\n b9.grid(row=2, column=2)\n\n\n buttons()\n\n screen.mainloop()\n\nexcept RecursionError:\n print(\"nice\")\n","repo_name":"garuda0py/xo","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"4031048496","text":"import win32com.client as win32\nimport os\n\nxlApp = win32.gencache.EnsureDispatch('excel.application')\nwb = xlApp.Workbooks.Open(os.getcwd()+'''\\用户反馈本周详情0120-0126.xlsx''')\n\ntry:\n for i in range(1,6):\n sheet = wb.Worksheets(i)\n for r in range(2,1000):\n for c in range(1,26):\n if sheet.Cells(r,c).Value != None:\n sheet.Cells(r,c).Value = None\nfinally:\n wb.Save()\n wb.Close()\n\nprint('====== done ========')","repo_name":"machinegame/Tools","sub_path":"PythonTools/handle_feedback/clear_all_sheets_data.py","file_name":"clear_all_sheets_data.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"70665192973","text":"#! venv/bin/python python\nimport argparse\nimport json\nimport os\n\nfrom flask_fixtures import load_fixtures\n\nfrom feature_requester_app import app, models\n\nhelp_message = \"\"\"\nuse as - python manage.py [command]\n\n[command] can be replaced by one of the following:\n\nrunserver - runs the flask local server\n\ncreate_db - create the initial database tables.\n\ninit_db - populate the database with initial data. Make sure to run this\nafter create_db is run.\n\ndrop_db - drop the current database with data\n\"\"\"\n\ndescription_message = \"\"\"\nUtility to run various flask commands.\n\"\"\"\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=description_message,\n formatter_class=argparse.RawTextHelpFormatter\n )\n parser.add_argument('command', help=help_message)\n args = parser.parse_args()\n\n if args.command == 'runserver':\n app.run(host='0.0.0.0')\n elif args.command == 'create_db':\n models.db.create_all()\n elif args.command == 'drop_db':\n models.db.drop_all()\n elif args.command == 'init_db':\n fixture_dir = os.path.join('feature_requester_app', 'fixtures')\n for fixture in os.listdir(fixture_dir):\n fixture_path = os.path.join(fixture_dir, fixture)\n with open(fixture_path, 'r') as infile:\n load_fixtures(\n models.db,\n json.loads(infile.read())\n )\n","repo_name":"BhargzShukla/feature-requester-flask","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"72155700491","text":"from flask import request, jsonify, Flask, make_response\nfrom kafka import KafkaProducer\nfrom time import sleep\n\napp = Flask(__name__)\n\nproducer = KafkaProducer(bootstrap_servers=['kafka:9092'], api_version=(0,10,1))\n\n@app.route('/', methods=['POST'])\ndef sendMessage():\n message = request.get_json()\n return make_response(message, 201)\n \t\napp.run()","repo_name":"alimertkoc/kafka_project","sub_path":"producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"33514758620","text":"import numpy as np\nimport scipy as sp\nimport pandas as pd\nimport os\nfrom cmath import rect\nfrom scipy.stats import chi2\n# from tqdm import tqdm\n\nnprect = np.vectorize(rect)\n\ndef line_params(r: float, x: float, c_total: float, a: float) -> complex:\n diag_value = 1/(r + x*1j) + c_total*0.5j if a == 0 else (1/(r + x*1j) + c_total*0.5j)/(a**2)\n out_diag_value = 1/(r + x*1j) if a == 0 else (1/(r + x*1j))/(a)\n return diag_value, out_diag_value\n\ndef get_line_values(parameter: str, bus_from: int, bus_to: int, network_values: dict) -> float:\n bus_from = int(bus_from - 1)\n bus_to = int(bus_to - 1)\n search_1 = (parameter, bus_from, bus_to)\n search_2 = (parameter, bus_to, bus_from)\n key = search_1 in network_values.keys()\n if key:\n return network_values.get(search_1)\n else:\n return network_values.get(search_2)\n \n\ndef derivada_p_Flow(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: tuple, n_bus: int) -> np.ndarray:\n state_amount = n_bus*2 -1\n pFlowVector = np.zeros(shape = (state_amount, ))\n for i in range(state_amount):\n if i < (n_bus-1):\n if i + 2 in bus:\n vi = state_vector[bus[0] + n_bus -2]\n vj = state_vector[bus[1] + n_bus -2]\n gij = get_line_values('g', bus[0], bus[1], network_values)\n bij = get_line_values('b', bus[0], bus[1], network_values)\n tetai = state_vector[bus[0] - 2] if bus[0] > 1 else 0\n tetaj = state_vector[bus[1] - 2] if bus[1] > 1 else 0\n pFlowVector[i] = vi*vj*(gij*np.sin(tetai-tetaj) - bij*np.cos(tetai-tetaj)) if i + 2 == bus[0] else -vi*vj*(gij*np.sin(tetai-tetaj) - bij*np.cos(tetai-tetaj))\n else:\n pFlowVector[i] = 0\n else:\n actual_bus = i - (n_bus - 2)\n if actual_bus in bus:\n vi = state_vector[bus[0] + n_bus -2]\n vj = state_vector[bus[1] + n_bus -2]\n gij = get_line_values('g', bus[0], bus[1], network_values)\n bij = get_line_values('b', bus[0], bus[1], network_values)\n gsi = get_line_values('gs', bus[0], bus[1], network_values)\n tetai = state_vector[bus[0] - 2] if bus[0] > 1 else 0\n tetaj = state_vector[bus[1] - 2] if bus[1] > 1 else 0\n pFlowVector[i] = -vj*(gij*np.cos(tetai-tetaj) + bij*np.sin(tetai-tetaj)) + 2*(gij + gsi)*vi if actual_bus == bus[0] else -vi*(gij*np.cos(tetai-tetaj) + bij*np.sin(tetai-tetaj))\n else:\n pFlowVector[i] = 0\n return pFlowVector\n\ndef derivada_q_Flow(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: tuple, n_bus: int) -> np.ndarray:\n state_amount = n_bus*2 -1\n qFlowVector = np.zeros(shape = (state_amount, ))\n for i in range(state_amount):\n if i < (n_bus-1):\n if i + 2 in bus:\n vi = state_vector[bus[0] + n_bus -2]\n vj = state_vector[bus[1] + n_bus -2]\n gij = get_line_values('g', bus[0], bus[1], network_values)\n bij = get_line_values('b', bus[0], bus[1], network_values)\n tetai = state_vector[bus[0] - 2] if bus[0] > 1 else 0\n tetaj = state_vector[bus[1] - 2] if bus[1] > 1 else 0\n qFlowVector[i] = -vi*vj*(gij*np.cos(tetai-tetaj) + bij*np.sin(tetai-tetaj)) if i + 2 == bus[0] else vi*vj*(gij*np.cos(tetai-tetaj) + bij*np.sin(tetai-tetaj))\n else:\n qFlowVector[i] = 0\n else:\n actual_bus = i - (n_bus - 2)\n if actual_bus in bus:\n vi = state_vector[bus[0] + n_bus -2]\n vj = state_vector[bus[1] + n_bus -2]\n gij = get_line_values('g', bus[0], bus[1], network_values)\n bij = get_line_values('b', bus[0], bus[1], network_values)\n bsi = get_line_values('bs', bus[0], bus[1], network_values)\n tetai = state_vector[bus[0] - 2] if bus[0] > 1 else 0\n tetaj = state_vector[bus[1] - 2] if bus[1] > 1 else 0\n qFlowVector[i] = -vj*(gij*np.sin(tetai-tetaj) - bij*np.cos(tetai-tetaj)) - 2*(bij + bsi)*vi if actual_bus == bus[0] else -vi*(gij*np.sin(tetai-tetaj) - bij*np.cos(tetai-tetaj))\n else:\n qFlowVector[i] = 0\n return qFlowVector\n\ndef derivada_p_Inj(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: int, n_bus: int) -> np.ndarray:\n state_amount = n_bus*2 -1\n pInjVector = np.zeros(shape = (state_amount, ))\n for i in range(state_amount):\n if i < (n_bus-1):\n if i + 2 == bus:\n vi = state_vector[bus + n_bus -2]\n tetai = state_vector[bus - 2] if bus > 1 else 0\n Bii = y_bar[bus -1, bus - 1].imag\n bus_looking = range(n_bus)\n aux_p = 0\n for j in bus_looking:\n vj = state_vector[j + n_bus - 1]\n Gij = y_bar[i+1, j].real\n Bij = y_bar[i+1, j].imag\n tetaj = state_vector[j - 1] if j > 0 else 0\n tetaij = tetai - tetaj\n aux_p += vi*vj*(Bij*np.cos(tetaij) - Gij*np.sin(tetaij))\n pInjVector[i] = aux_p - vi*vi*Bii\n else:\n vi = state_vector[bus + n_bus -2]\n vj = state_vector[i + n_bus]\n tetai = state_vector[bus - 2] if bus > 1 else 0\n Gij = y_bar[bus - 1, i+1].real\n Bij = y_bar[bus - 1, i+1].imag\n tetaj = state_vector[i]\n tetaij = tetai - tetaj\n pInjVector[i] = vi*vj*(Gij*np.sin(tetaij) - Bij*np.cos(tetaij))\n else:\n actual_bus = i - (n_bus - 2)\n if actual_bus == bus:\n vi = state_vector[bus + n_bus -2]\n tetai = state_vector[bus - 2] if bus > 1 else 0\n Gii = y_bar[bus -1, bus - 1].real\n bus_looking = range(n_bus)\n aux_p = 0\n for j in bus_looking:\n vj = state_vector[j + n_bus - 1]\n Gij = y_bar[actual_bus-1, j].real\n Bij = y_bar[actual_bus-1, j].imag\n tetaj = state_vector[j - 1] if j > 0 else 0\n tetaij = tetai - tetaj\n aux_p += vj*(Bij*np.sin(tetaij) + Gij*np.cos(tetaij))\n pInjVector[i] = aux_p + vi*Gii\n else:\n vi = state_vector[bus + n_bus -2]\n vj = state_vector[i]\n tetai = state_vector[bus - 2] if bus > 1 else 0\n Gij = y_bar[bus - 1, actual_bus-1].real\n Bij = y_bar[bus - 1, actual_bus-1].imag\n tetaj = state_vector[actual_bus-2] if actual_bus > 1 else 0\n tetaij = tetai - tetaj\n pInjVector[i] = vi*(Gij*np.cos(tetaij) + Bij*np.sin(tetaij))\n return pInjVector\n\ndef derivada_q_Inj(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: int, n_bus: int) -> np.ndarray:\n state_amount = n_bus*2 -1\n qInjVector = np.zeros(shape = (state_amount, ))\n for i in range(state_amount):\n if i < (n_bus-1):\n if i + 2 == bus:\n vi = state_vector[bus + n_bus -2]\n tetai = state_vector[bus - 2] if bus > 1 else 0\n Gii = y_bar[bus -1, bus - 1].real\n bus_looking = range(n_bus)\n aux_p = 0\n for j in bus_looking:\n vj = state_vector[j + n_bus - 1]\n Gij = y_bar[i+1, j].real\n Bij = y_bar[i+1, j].imag\n tetaj = state_vector[j - 1] if j > 0 else 0\n tetaij = tetai - tetaj\n aux_p += vi*vj*(Bij*np.sin(tetaij) + Gij*np.cos(tetaij))\n qInjVector[i] = aux_p - vi*vi*Gii\n else:\n vi = state_vector[bus + n_bus -2]\n vj = state_vector[i + n_bus]\n tetai = state_vector[bus - 2] if bus > 1 else 0\n Gij = y_bar[bus - 1, i+1].real\n Bij = y_bar[bus - 1, i+1].imag\n tetaj = state_vector[i]\n tetaij = tetai - tetaj\n qInjVector[i] = -vi*vj*(Gij*np.cos(tetaij) + Bij*np.sin(tetaij))\n else:\n actual_bus = i - (n_bus - 2)\n if actual_bus == bus:\n vi = state_vector[bus + n_bus -2]\n tetai = state_vector[bus - 2] if bus > 1 else 0\n Bii = y_bar[bus -1, bus - 1].imag\n bus_looking = range(n_bus)\n aux_p = 0\n for j in bus_looking:\n vj = state_vector[j + n_bus - 1]\n Gij = y_bar[actual_bus-1, j].real\n Bij = y_bar[actual_bus-1, j].imag\n tetaj = state_vector[j - 1] if j > 0 else 0\n tetaij = tetai - tetaj\n aux_p += vj*(Gij*np.sin(tetaij) - Bij*np.cos(tetaij))\n qInjVector[i] = aux_p - vi*Bii\n else:\n vi = state_vector[bus + n_bus -2]\n vj = state_vector[i]\n tetai = state_vector[bus - 2] if bus > 1 else 0\n Gij = y_bar[bus - 1, actual_bus-1].real\n Bij = y_bar[bus - 1, actual_bus-1].imag\n tetaj = state_vector[actual_bus-2] if actual_bus > 1 else 0\n tetaij = tetai - tetaj\n qInjVector[i] = vi*(Gij*np.sin(tetaij) - Bij*np.cos(tetaij))\n return qInjVector\n\ndef derivada_voltage(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: int, n_bus: int) -> np.ndarray:\n state_amount = n_bus*2 -1\n vVector = np.zeros(shape = (state_amount, ))\n for i in range(state_amount):\n if i < (n_bus-1):\n vVector[i] = 0\n else:\n actual_bus = i - (n_bus - 2)\n if actual_bus == bus:\n vVector[i] = 1\n else:\n vVector[i] = 0\n return vVector\n\ndef derivada_current_Flow(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: tuple, n_bus: int) -> np.ndarray:\n return\n\ndef p_Flow(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: tuple, n_bus: int) -> np.ndarray:\n gij = get_line_values('g', bus[0], bus[1], network_values)\n bij = get_line_values('b', bus[0], bus[1], network_values)\n gsi = get_line_values('gs', bus[0], bus[1], network_values)\n tetai = state_vector[bus[0] - 2] if bus[0] > 1 else 0\n tetaj = state_vector[bus[1] - 2] if bus[1] > 1 else 0\n tetaij = tetai - tetaj\n vi = state_vector[bus[0] + (n_bus - 2)]\n vj = state_vector[bus[1] + (n_bus - 2)]\n pFlow = vi*vi*(gsi + gij) - vi*vj*(gij*np.cos(tetaij) + bij*np.sin(tetaij))\n return pFlow\n\ndef q_Flow(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: tuple, n_bus: int) -> np.ndarray:\n gij = get_line_values('g', bus[0], bus[1], network_values)\n bij = get_line_values('b', bus[0], bus[1], network_values)\n bsi = get_line_values('bs', bus[0], bus[1], network_values)\n tetai = state_vector[bus[0] - 2] if bus[0] > 1 else 0\n tetaj = state_vector[bus[1] - 2] if bus[1] > 1 else 0\n tetaij = tetai - tetaj\n vi = state_vector[bus[0] + (n_bus - 2)]\n vj = state_vector[bus[1] + (n_bus - 2)]\n qFlow = -vi*vi*(bsi + bij) - vi*vj*(gij*np.sin(tetaij) - bij*np.cos(tetaij))\n return qFlow\n\ndef p_Inj(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: int, n_bus: int) -> np.ndarray:\n vi = state_vector[bus + (n_bus - 2)]\n tetai = state_vector[bus - 2] if bus > 1 else 0\n pInj = 0\n for j in range(n_bus):\n tetaj = state_vector[j - 1] if j > 0 else 0\n vj = state_vector[j + (n_bus - 1)]\n Gij = y_bar[bus-1, j].real\n Bij = y_bar[bus-1, j].imag\n tetaij = tetai - tetaj\n pInj += vi*vj*(Gij*np.cos(tetaij) + Bij*np.sin(tetaij))\n return pInj\n\ndef q_Inj(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: int, n_bus: int) -> np.ndarray:\n vi = state_vector[bus + (n_bus - 2)]\n tetai = state_vector[bus - 2] if bus > 1 else 0\n qInj = 0\n for j in range(n_bus):\n tetaj = state_vector[j - 1] if j > 0 else 0\n vj = state_vector[j + (n_bus - 1)]\n Gij = y_bar[bus-1, j].real\n Bij = y_bar[bus-1, j].imag\n tetaij = tetai - tetaj\n qInj += vi*vj*(Gij*np.sin(tetaij) - Bij*np.cos(tetaij))\n return qInj\n\ndef voltage(y_bar: np.ndarray, network_values: dict, state_vector: np.ndarray, bus: int, n_bus: int) -> np.ndarray:\n return state_vector[bus + (n_bus - 2)]\n\ndef measurement_Function(order_meansured: list, meansured_network: dict, y_bar: np.ndarray, network_values: dict, state_vetctor: np.ndarray = None) -> np.ndarray:\n n_bus = y_bar.shape[0]\n if state_vetctor is None:\n states = [0]*(n_bus-1)\n states.extend([1]*n_bus)\n state_vetctor = np.array(states)\n line_amount = len(order_meansured)\n z_array = np.zeros(shape = (line_amount, ))\n meansured_estimated = np.zeros(shape = (line_amount, ))\n func_dict = {'P_inj': p_Inj,'P_flow': p_Flow, 'Q_inj': q_Inj, 'Q_flow': q_Flow, 'V': voltage, 'I_flow': derivada_current_Flow}\n for i, meansured in enumerate(order_meansured):\n meansured_type = list(meansured.keys())[0]\n bus = meansured.get(meansured_type)\n line_h = func_dict[meansured_type](y_bar, network_values, state_vetctor, bus, n_bus)\n meansured_estimated[i] = line_h\n z_array[i] = meansured_network[meansured_type[0]][bus][0] - line_h\n return z_array, meansured_estimated\n\ndef meansurement_jacobian(order_meansured: list, y_bar: np.ndarray, network_values: dict, state_vetctor: np.ndarray = None) -> np.ndarray:\n n_bus = y_bar.shape[0]\n if state_vetctor is None:\n states = [0]*(n_bus-1)\n states.extend([1]*n_bus)\n state_vetctor = np.array(states)\n line_amount = len(order_meansured)\n col_amount = len(state_vetctor)\n h_matrix = np.zeros(shape = (line_amount, col_amount))\n func_dict = {'P_inj': derivada_p_Inj,'P_flow': derivada_p_Flow, 'Q_inj': derivada_q_Inj, 'Q_flow': derivada_q_Flow, 'V': derivada_voltage, 'I_flow': derivada_current_Flow}\n for i, meansured in enumerate(order_meansured):\n meansured_type = list(meansured.keys())[0]\n bus = meansured.get(meansured_type)\n line_h = func_dict[meansured_type](y_bar, network_values, state_vetctor, bus, n_bus)\n h_matrix[i,:] = line_h\n return h_matrix\n\ndef get_G_matrix(H: np.ndarray, R: np.ndarray) -> np.ndarray:\n R_inv = np.linalg.inv(R)\n H_T = np.transpose(H.copy())\n h_r_inv = H_T@R_inv\n g = h_r_inv@H\n return g\n\ndef read_meansured(filepath: str, filename: str, data: pd.core.frame.DataFrame = None):\n if data is None:\n path_file = os.path.join(filepath, filename)\n meansured = pd.read_excel(path_file)\n else:\n meansured = data\n data = {}\n R = np.identity(meansured.shape[0])\n order_meansured = []\n for i, line in enumerate(meansured.iterrows()):\n meansured_line = line[1]\n meansured_type = meansured_line['Tipo']\n bus_from_to = (meansured_line['De'], meansured_line['Para']) if meansured_line['Para'] != '-' else meansured_line['De']\n if meansured_type == \"P\" or meansured_type == \"Q\" or meansured_type == \"I\":\n meansured_type_specific = f'{meansured_type}_flow' if type(bus_from_to) == tuple else f'{meansured_type}_inj'\n else:\n meansured_type_specific = meansured_type\n order_meansured.append({meansured_type_specific: bus_from_to})\n R[i,i] = meansured_line['Desvio Padrão']**2\n med_caracteristic = (meansured_line['Valor'], meansured_line['Desvio Padrão'])\n data_med = {bus_from_to: med_caracteristic}\n if meansured_type not in list(data.keys()):\n data[meansured_type] = data_med\n else:\n data[meansured_type].update(data_med)\n return data, R, order_meansured\n\n\ndef mount_y_bar(filepath: str, filename: str, data: pd.core.frame.DataFrame = None):\n if data is None:\n path_file = os.path.join(filepath, filename)\n line_data = pd.read_excel(path_file)\n else:\n line_data = data\n total_lines = set(line_data['De'].to_list() + line_data['Para'].to_list())\n n = len(total_lines)\n y_bar_matriz = np.zeros((n, n), dtype=complex)\n connective_values = {}\n for lines in line_data.iterrows():\n data_lines = lines[1]\n bus_from = int(data_lines['De'] - 1)\n bus_to = int(data_lines['Para'] - 1)\n coord = (bus_from, bus_to)\n bshunt = float(1/data_lines['C']) if data_lines['C'] > 0 else 0\n diag_value, out_diag_value = line_params(data_lines['R'], data_lines['X'], bshunt, data_lines['Tap'])\n _, line_admitance = line_params(data_lines['R'], data_lines['X'], 0, 0)\n line_admitance_shunt = bshunt*0.5j if bshunt > 0 else 0\n connect = {('g', *coord): line_admitance.real, \n ('b', *coord): line_admitance.imag,\n ('gs', *coord): line_admitance_shunt.real if type(line_admitance_shunt) == complex else 0, \n ('bs', *coord): line_admitance_shunt.imag if type(line_admitance_shunt) == complex else 0}\n if len(connective_values) == 0:\n connective_values = connect.copy()\n else:\n connective_values.update(connect)\n y_bar_matriz[bus_from, bus_from] += diag_value\n y_bar_matriz[bus_to, bus_to] += diag_value if data_lines['Tap'] == 0 else diag_value*(data_lines['Tap']**2)\n y_bar_matriz[bus_from, bus_to] -= out_diag_value\n y_bar_matriz[bus_to, bus_from] -= out_diag_value\n return y_bar_matriz, connective_values\n\ndef add_susceptance_shunt(filepath, filename, ybar) -> np.ndarray:\n path_file = os.path.join(filepath, filename)\n susceptance_data = pd.read_excel(path_file)\n for lines in susceptance_data.iterrows():\n shunt_data = lines[1]\n bus = int(shunt_data['Bus'] - 1)\n ybar[bus, bus] += float(shunt_data['Shunt'])*1j\n return ybar\n\ndef pot_inj(voltage: np.ndarray, angle: np.ndarray, y_bar: np.ndarray) -> np.ndarray:\n complex_volt = nprect(voltage, np.deg2rad(angle))\n current = np.dot(y_bar, complex_volt)\n return complex_volt.T*np.conjugate(current)\n\ndef monte_carlo_state_estimation():\n y_bar_matriz, network_values = mount_y_bar('.', 'Dados_linha.xlsx')\n meansured, R, order_meansured = read_meansured('.','Meansured_data.xlsx')\n tol = 1e-4\n error = tol+1\n nits = 0\n results = []\n for _ in range(10000):\n meansured_smc = {}\n for key, val in meansured.items():\n for second_key, params in val.items():\n sampling_data = {second_key: (np.random.normal(*params), params[1])}\n if key not in meansured_smc.keys():\n meansured_smc[key] = sampling_data\n else:\n meansured_smc[key].update(sampling_data)\n error = tol+1\n nits = 0\n while error > tol:\n nits += 1\n if nits == 1:\n n_bus = y_bar_matriz.shape[0]\n states = [0]*(n_bus-1)\n states.extend([1]*n_bus)\n state_array = np.array(states, dtype=float)\n zArray, _ = measurement_Function(order_meansured, meansured_smc, y_bar_matriz, network_values, state_array)\n h_matrix = meansurement_jacobian(order_meansured, y_bar_matriz, network_values, state_array)\n g_matrix = get_G_matrix(h_matrix, R)\n H_t = np.transpose(h_matrix)\n R_inv = np.linalg.inv(R)\n h_r = H_t@R_inv\n t = h_r@zArray\n x_delta = np.linalg.solve(g_matrix, t)\n state_array += x_delta\n error = np.max(np.abs(x_delta))\n _, final_med = measurement_Function(order_meansured, meansured_smc, y_bar_matriz, network_values, state_array)\n results.append(final_med)\n pd.DataFrame(results).to_csv('resultado_smc_all_error.csv', decimal=',', sep = ';')\n\ndef linear_state_estimation(order_meansured: list, y_bar_matriz: np.ndarray):\n med_data = pd.DataFrame(columns = ['Meansured_Type', 'Location'])\n line_amount = np.sum([1 if list(x.keys())[0][0] == 'P' else 0 for x in order_meansured ])\n col_amount = y_bar_matriz.shape[0] - 1\n H_linear = np.zeros((line_amount, col_amount))\n l = 0\n for meansured in order_meansured:\n meansured_type = list(meansured.keys())[0]\n meansured_loc = list(meansured.values())[0]\n if meansured_type[0] != 'P':\n pass\n else:\n med_data.loc[len(med_data)] = [meansured_type, meansured_loc]\n if meansured_type.split('_')[-1] == 'flow':\n if meansured_loc[0] - 2 >= 0:\n H_linear[l][meansured_loc[0] - 2] = 1\n if meansured_loc[1] - 2 >= 0:\n H_linear[l][meansured_loc[1] - 2] = -1\n else:\n aux = y_bar_matriz[meansured_loc - 1][1:]\n conected = aux == 0\n H_linear[l][~conected] = -1\n if meansured_loc - 2 >= 0:\n H_linear[l][meansured_loc - 2] = np.sum(conected)\n l += 1\n R = np.identity(line_amount)\n G = H_linear.T@H_linear\n try:\n detG = np.linalg.det(G)\n G_1 = np.linalg.inv(G)\n E = R - (H_linear@(G_1))@H_linear.T\n except:\n detG = 0\n G_1 = np.zeros(G.shape)\n E = np.zeros(R.shape)\n is_observable=False if detG <= 1e-11 else True\n return H_linear, E, med_data,is_observable\n\ndef identify_critical(E: np.ndarray, med_data: pd.core.frame.DataFrame):\n med_data['Criticality'] = np.nan\n amount_meansured = E.shape[0]\n residual = np.sum(E, axis = 1)\n standart_dev = np.sqrt(np.diag(E))\n aux = standart_dev[:,None]*standart_dev\n aux = np.where(aux == 0, 0.00000001, aux)\n gamma = np.divide(np.abs(E), aux)\n normalized_residual = np.abs(np.where(standart_dev == 0, 0, residual/standart_dev))\n normalized_residual__ = np.where(normalized_residual == 0, 0.000001, normalized_residual)\n rho = normalized_residual__[:,None]/normalized_residual__\n critical_meansured = (standart_dev <= 1e-6)*(residual <= 1e-6)\n number_cmeans = list(np.arange(amount_meansured)[critical_meansured])\n med_data.loc[med_data.index.isin(number_cmeans), 'Criticality'] = 'Medida Crítica'\n critical_sets = []\n csets = []\n amount_csets = 0\n for i in range(0, amount_meansured):\n for j in range(i+1, amount_meansured):\n if (rho[i][j] >= 0.98) and (gamma[i][j] >= 0.98) and (i not in number_cmeans) and (j not in number_cmeans):\n csets.append(i)\n csets.append(j)\n if any([set(csets).issubset(x) for x in critical_sets]):\n csets = []\n if len(csets) > 0:\n amount_csets += 1\n critical_sets += [set(csets)]\n med_data.loc[med_data.index.isin(csets), 'Criticality'] = f'Conj.Crítico_{amount_csets}'\n csets = []\n return med_data\n\ndef observable_system(meansured_data_instante = None, line_data = None) -> tuple:\n y_bar_matriz, network_values = mount_y_bar('.', '.', data = line_data)\n meansured, R, order_meansured = read_meansured('.', '.', data= meansured_data_instante)\n _, E, med_data,is_observable = linear_state_estimation(order_meansured, y_bar_matriz)\n if is_observable:\n criticality_data = identify_critical(E, med_data)\n mapping = {criticality_data.columns[0]:'Tipo', criticality_data.columns[1]: 'Localização', criticality_data.columns[2]:'Criticalidades'}\n criticality_data = criticality_data.rename(columns=mapping)\n return is_observable, criticality_data#.rename(columns= {\"Meansured_Type\": \"Tipo\", \"Location\": \"Localização\", \"Criticality\": \"Criticalidades\"})\n else:\n return is_observable, None\n\n\ndef state_estimation(path = '.', line_params = 'Dados_linha_XIV_barras.xlsx', meansured_file = 'Meansured_data_XIV_bus.xlsx', meansured_data_instante = None, line_data = None):\n y_bar_matriz, network_values = mount_y_bar(path, line_params, data = line_data)\n meansured, R, order_meansured = read_meansured(path, meansured_file, data= meansured_data_instante)\n _, E, med_data,is_observable = linear_state_estimation(order_meansured, y_bar_matriz)\n criticality_data = identify_critical(E, med_data)\n tol = 1e-6\n error = tol+1\n nits = 0\n J_list = []\n while error > tol and nits<50:\n nits += 1\n if nits == 1:\n n_bus = y_bar_matriz.shape[0]\n states = [0]*(n_bus-1)\n states.extend([1]*n_bus)\n state_array = np.array(states, dtype=float)\n zArray, _ = measurement_Function(order_meansured, meansured, y_bar_matriz, network_values, state_array)\n h_matrix = meansurement_jacobian(order_meansured, y_bar_matriz, network_values, state_array)\n g_matrix = get_G_matrix(h_matrix, R)\n H_t = np.transpose(h_matrix)\n R_inv = np.linalg.inv(R)\n zArray = np.where(np.abs(zArray) < 1e-6, 0, zArray)\n J = np.sum(np.dot(zArray**2, R_inv))\n J_list.append(J)\n h_r = H_t@R_inv\n t = h_r@zArray\n x_delta = np.linalg.solve(g_matrix, t)\n state_array += x_delta\n error = np.max(np.abs(x_delta))\n # print('-----------------------------------')\n # print(f'Quantidade de Iterações: {nits} para uma tolerância de : {tol: .2e}')\n # print('-----------------------------------')\n State_dataframe = pd.DataFrame(state_array[n_bus-1:], columns = ['Mag. de Tensão'], index = list(range(1,n_bus+1)))\n State_dataframe['Ang.(°)'] = np.degrees([0]+state_array[:n_bus-1].tolist())\n Error_SE, zArray = measurement_Function(order_meansured, meansured, y_bar_matriz, network_values, state_array)\n data_SE = pd.DataFrame([med.keys() for med in order_meansured], columns = ['Tipos'])\n data_SE['Localização'] = [list(loc.values())[0] for loc in order_meansured]\n data_SE['Valor Medido'] = [meansured[list(x.keys())[0].split('_')[0]][list(x.values())[0]][0] for x in order_meansured]\n data_SE['Valor Estimado'] = zArray\n data_SE['Desvio'] = np.where(np.abs(Error_SE) < 1e-6, 0, Error_SE)\n Sigma = np.linalg.inv(g_matrix)\n Erii = np.abs(R - h_matrix@Sigma@H_t)\n Erii = np.sqrt(np.diag(Erii))\n data_SE['Res. Normalizado'] = np.where(Erii > 10e-10, np.abs(data_SE['Desvio']/Erii), np.inf)\n # data_SE['Error_Normalize'] = np.where(Erii > 0, data_SE['Error']/Erii, 0)\n # print('-----------------------------------')\n # print('Valor Final da Função Objetivo (J)')\n # J = Error_SE@R_inv\n # J = J@Error_SE\n # print(J)\n # print('-----------------------------------')\n ddof = data_SE.shape[0] - 2*(State_dataframe.shape[0]) + 1\n J_critical = chi2.ppf(1-0.05, ddof)\n mapping = {criticality_data.columns[0]:'Tipo', criticality_data.columns[1]: 'Localização', criticality_data.columns[2]:'Criticalidades'}\n criticality_data = criticality_data.rename(columns=mapping)\n\n data_SE['Tipos'] = data_SE['Tipos'].map({\"P_flow\": \"Fluxo Pot. Ativ.\", \"Q_flow\": \"Fluxo Pot. Reativ.\", \"P_inj\": \"Injeção Pot. Ativ.\", \"Q_inj\": \"Injeção Pot. Reativ.\",'V':'Módulo da Tensão'})\n criticality_data['Tipo'] = criticality_data['Tipo'].map({\"P_flow\": \"Fluxo Pot. Ativ.\", \"Q_flow\": \"Fluxo Pot. Reativ.\", \"P_inj\": \"Injeção Pot. Ativ.\", \"Q_inj\": \"Injeção Pot. Reativ.\",'V':'Módulo da Tensão'})\n critical_measured = criticality_data[criticality_data['Criticalidades'] == 'Medida Crítica']\n data_SE.loc[(data_SE['Tipos'].isin(critical_measured['Tipo'])) & (data_SE['Localização'].isin(critical_measured['Localização'])), 'Res. Normalizado'] = np.inf\n return y_bar_matriz, criticality_data, J_list, J_critical, State_dataframe, data_SE,is_observable\n\n\nif __name__ == \"__main__\":\n y_bar_matriz, criticality_data, J_list, J_critical, State_dataframe, data_SE = state_estimation(line_params = 'Dados_linha_XIV_barras.xlsx', meansured_file = 'Meansured_data_XIV_bus.xlsx')\n print(data_SE)\n #Pode salvar dessa forma tanto o State_dataframe quanto o data_SE\n filename = 'Estado_Estimado.csv'\n State_dataframe.to_csv(filename, sep = ';', decimal = ',')\n\n\n #Utilização para Amostras Múltiplas\n # medidas_dia = pd.read_excel('Meansured_data_XIV_bus_multi_instant.xlsx')\n # cols = medidas_dia.columns[:-1]\n # residual_df = []\n\n # for inst in tqdm(medidas_dia['Instante'].unique()):\n # df = medidas_dia[medidas_dia['Instante'] == inst][cols]\n # _, _, _, _, _, data_SE = state_estimation(line_params='Dados_linha_XIV_barras_corrompidos_not_all.xlsx',meansured_data_instante=df)\n # data_SE['Instante'] = inst\n # residual_df.append(data_SE)\n # residual_df = pd.concat(residual_df, axis = 0)\n # residual_df.to_excel('dados_residuos_dia_corromp_aleatorio_not_all.xlsx')\n\n\n\n \n\n\n\n\n","repo_name":"AyresNishio/State-Estimation-Educational-Tool","sub_path":"SisPotFunctions/PSSS.py","file_name":"PSSS.py","file_ext":"py","file_size_in_byte":29095,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"11035284043","text":"def affiche_menu(): #mot clé def est fonction\n print(\"Menu : \")\n print(\"*Action 1 : \")\n print(\"*Action 2 : \")\n\naffiche_menu()\n\naffiche_menu()\n\naffiche_menu() #appelle à la fonction\n\ndef dire(texte): #argument decrit précisement pour clarté\n print('# ' +texte)\n\ndire(\"Bonjour\") #affecte fonction dire grâce paramètre\ndire(\"Au revoir\") #peut dire plusieurs chose en fonction argument\ndire(\"A demain\")\n\ndef addition(a, b):\n return a + b #addition grâce return et +\n\nsomme = addition(4, 1)\nprint(somme)\n\ndef saluer(nom = 'visiteur'): #valeur par defaut\n print(\"Bonjour \" + nom)\n\nsaluer('Clem')\nsaluer()\n","repo_name":"HAjulien/demo-python","sub_path":"test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"32206541588","text":"#!/usr/bin/env python3\n\n#import protobuf\nimport pathlib\nimport random\nimport os\nimport base64\nimport time\nimport concurrent.futures,threading\nimport dbm\n\ndb = dbm.open(\"/data/dataout/features\",flag='n')\ndblock = threading.Lock()\n\ndef writeItem(key,payload):\n #with dblock:\n\n db[key]=payload\n return\n\ndef generate(start,end):\n try:\n print(\"Starting batch\",start,end)\n items=[]\n for i in range(start,end):\n hexnumber=\"%06x\"%(i)\n firstdir=hexnumber[0:2]\n seconddir=hexnumber[2:4]\n pathlib.Path(\"data-dbm/%s/%s\"%(firstdir,seconddir)).mkdir(parents=True, exist_ok=True)\n accountid = '%040x' % random.randrange(16**40)\n plasticid = '%010i' % random.randrange(10**10)\n with open(\"data-dbm/%s/%s/%s\"%(firstdir,seconddir,hexnumber),\"w\") as fh:\n fh.write(accountid+\",\"+plasticid)\n #print(i,hexnumber,firstdir,seconddir,accountid,plasticid)\n length=random.randint(200,400000)\n #payload=base64.b64encode(os.urandom(length))\n payload=os.urandom(length)\n key=\"%s#%s\"%(accountid,plasticid)\n writeItem(key,payload)\n print(\"Ending batch\",start,end)\n except Exception as err:\n print(\"Error:\",err)\n raise\n\nbatch=1000\nwith concurrent.futures.ThreadPoolExecutor() as executor:\n i=0\n while i<1000000:\n future=executor.submit(generate,i,i+batch)\n i+=batch\n\ndb.close()\n","repo_name":"barberd/P49-test","sub_path":"generator-dbm.py","file_name":"generator-dbm.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"25532105521","text":"\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\nsetup(\n name='shapleychains',\n version='0.0.1',\n description='',\n url= 'https://github.com/cwayad/shapleychains.git',\n long_description=long_description,\n long_description_content_type= \"text/markdown\",\n author='Célia Wafa AYAD',\n author_email='celiane.ayad@gmail.com',\n packages=find_packages('src'), \n package_dir={'': 'src'},\n install_requires=[\n 'pandas',\n 'numpy',\n 'sklearn',\n 'shap',\n 'matplotlib',\n ],\n extras_requere = {\n \"dev\": [\n \"pytest>=3.7\"\n ]\n }\n)","repo_name":"cwayad/shapleychains","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"74728896650","text":"import pandas as pd \r\nimport matplotlib.pyplot as plt \r\ndata_set = pd.read_csv(r'C:\\Users\\agnih\\Desktop\\ML\\tvmarketing.csv')\r\nprint(data_set.columns)\r\n\r\n#Checking for null values Quality Check-1\r\nprint(data_set.isnull().sum())\r\n\r\n#Checking for duplicate values Quality Check-2\r\nprint(data_set.duplicated().sum())\r\n\r\n#Checking for Outliers in the Columns Quality Check-3\r\nplt.boxplot(data_set['Sales'])\r\nplt.show()\r\n\r\n","repo_name":"YajuluRenduchintala/Machine-Learning-Models","sub_path":"Quality Check.py","file_name":"Quality Check.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"12853417195","text":"#!/usr/bin/python\n\n# Author: Andy Ng \n# Webcoder.io All Rights Reserved\n# Please do not delete the credit above!\n\n# Given an array of n integers a_0,a_1,...,a_{n-1} positive\n# and negative, such as [-1, 3, 2, -7, 4, 2, -3, 3, -1], you\n# want to find the largest sum of contiguous integers; in\n# this case it would be 4 + 2 - 2 + 3 = 7.\n\n# Note: the empty set has sum 0.\n###########################################################\n\ndef partialsums(a):\n ## computes all of the partial sums in linear time\n ## ans[0] = a[0], ans[1] = a[0]+a[1], ans[2] = a[0]+a[1]+a[2], etc\n partial = 0\n ans = []\n for elt in a:\n partial += elt\n ans.append(partial)\n return ans\n\ndef maxsums_dc(a):\n ## divide and conquer algorithm to return\n ## the largest sum in time O(n log n)\n\n # Base case step\n if len(a) == 0:\n return 0\n elif len(a) == 1:\n return max(a[0], 0)\n\n # Recursive case step\n # '//' is a floor division\n # http://docs.python.org/release/2.2.3/whatsnew/node7.html\n middle = len(a)//2\n\n lsum = 0\n lmax = 0\n # xrange([start], stop[, step])\n # This function is very similar to range()\n # but returns an “xrange object”\n for left in xrange(1, middle + 1):\n lsum += a[middle - left]\n lmax = max(lmax, lsum)\n\n rsum = 0\n rmax = 0\n for right in xrange(middle, len(a)):\n rsum += a[right]\n rmax = max(rmax, rsum)\n\n return max(lmax + rmax, maxsums_dc(a[:middle]), maxsums_dc(a[middle:]))\n #O(n log n)","repo_name":"webcoderio/computer-science-projects","sub_path":"Algorithm Design (Python)/AD_hw6.1_MaxSums2/src/maxsums2.py","file_name":"maxsums2.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"10553141707","text":"def fred(q):\n return q**2 - 3\n\ndef sumFredWhile(n):\n summ = 0\n n -= 1\n while n >= 0:\n summ += fred(n)\n n -= 1\n return(summ)\nsumFredWhile(3)\n\ndef sumFredFor(n):\n summ = 0\n for i in range(n):\n summ += fred(i)\n return(summ)\nsumFredFor(3)\n\ndef sumFredBetween(low,high):\n print(sumFredWhile(high) - sumFredWhile(low))\nsumFredBetween(0,3)\n\ndef sumFredBetween2(a,b):\n print(abs(sumFredWhile(high) - sumFredFor(low)))\nsumFredBetween(0,3)\n\ndef factorPairs(n):\n for i in range(1,int(n/2 + 1)):\n if n % i == 0:\n print(i)\nfactorPairs(36)\n\nimport math\n\ndef is_prime(x):\n hi = 0\n if x<2: \n hi = False \n elif x == 2: \n hi = True \n else: \n for n in range(2, x): \n if x%n==0: \n hi = False \n print(hi)\nis_prime(23)\nis_prime(50)\n\ndef largestPrimeFactor(n):\n prime_factors = []\n for i in range(1,int(n**(1/2) + 1)):\n if n % i == 0 and isPrime(i):\n factors.append(i)\n\n print(max(prime_factors))\n \n\nis_prime(98767)\nis_prime(987127)\nis_prime(135797533)\nis_prime(2345678911)\nis_prime(12345677654381)\n\n\n","repo_name":"jsonkao/mks22","sub_path":"hw/mks22-hw0313.py","file_name":"mks22-hw0313.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"31609910250","text":"def reverse(self, x: int) -> int:\n if x == 0:\n return 0\n if (x > (2 ** 31 - 1)) or (x < (-2 ** 31)):\n return 0\n if x < 0:\n pop = 0 - x\n else:\n pop = x\n y = ''\n zeroAtEnd = True\n for i in range(len(str(pop))):\n reminder = pop % 10\n pop = pop // 10\n if (reminder != 0) or not zeroAtEnd:\n zeroAtEnd = False\n y += str(reminder)\n if x < 0:\n y = '-' + y\n y = int(y)\n if (y > (2 ** 31 - 1)) or (y < (-2 ** 31)):\n return 0\n\n return y","repo_name":"meihanhan/leetcode_solutions","sub_path":"Python/ReverseInteger.py","file_name":"ReverseInteger.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"14927552551","text":"#Question12_activity1\r\n\r\n'''\r\ns='1A2B3C45d6e7'\r\ncalculate the sum of digits\r\nnote: use for loop and isdigit() method\r\n'''\r\n#Activity-code: KL/EP-19/A-001\r\n#Platform: python 3.10,winx 10\r\n#author name=\"mr.JITHENDRAN\"\r\n#Role: Software Engineer, Apsilon\r\n\r\n\r\n\r\n\r\ns='1A2B3C45d6e7' #declare a variable 's'\r\na=0 #declare a variable accordingly\r\nfor i in s: #using for loop test the 'i'condition\r\n if i.isdigit(): #using 'if' condition check whether the 'i' has number values or not\r\n a+=int(i) #increment operator can be used and mention it as integer\r\nprint('the sum of digits is : ',a) #print the sum of digits and declare a.\r\n","repo_name":"ApsilonTech/Training","sub_path":"JithendranRD/activity1/program12@activity1.py","file_name":"program12@activity1.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"24903323400","text":"\ndef gamma(l, i):\n b = {'0': 0, '1': 0}\n for e in l:\n b[e[i]] += 1\n if b['0'] > b['1']:\n return 0\n else:\n return 1\n\n\ndef epsilon(l, i):\n b = {'0': 0, '1': 0}\n for e in l:\n b[e[i]] += 1\n if b['0'] > b['1']:\n return 1\n else:\n return 0\n\n\nnums = []\nwith open(\"3Ainput.txt\") as f:\n for line in f:\n nums.append(line.rstrip())\n\nogrcandidates = nums.copy()\nfor idx in range(len(nums[0])):\n g = gamma(ogrcandidates, idx)\n t = [e for e in ogrcandidates if int(e[idx]) == g]\n ogrcandidates = t\n if len(t) == 1:\n print(idx)\n break\nprint(ogrcandidates, int(\"\".join(ogrcandidates), 2))\n\ncsrcandidates = nums.copy()\nfor idx in range(len(nums[0])):\n g = epsilon(csrcandidates, idx)\n t = [e for e in csrcandidates if int(e[idx]) == g]\n csrcandidates = t\n if len(t) == 1:\n print(idx)\n break\nprint(csrcandidates, int(\"\".join(csrcandidates), 2))\n\n\nprint(int(\"\".join(csrcandidates), 2) * int(\"\".join(ogrcandidates), 2))\n","repo_name":"emteeoh/adventofcode2021","sub_path":"3/3B.py","file_name":"3B.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"6591904285","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\nscore = np.array([0.19,0.86,0.17,0.12,0.04,0.78,0.16,0.51,0.57,0.27])\nanomaly =np.array([0.0,1.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0])\n\ndata0 = np.vstack((score,anomaly))\ndata1 = data0[:, np.argsort(data0[0])[::-1]]\nscore_sorted = data1[0]\nanomaly_sorted = data1[1]\nn_total = len(anomaly)\nn_norm = sum(anomaly)\nn_anom = n_total - n_norm\ncoverage = [0] * n_total\ndetection = [0] * n_total\n\nfor i in range (n_total):\n n_detectedAnom = sum(anomaly_sorted[0:i+1])\n n_detectedNorm = (n_total-i-1) - sum(anomaly_sorted[i+1:10])\n coverage[i] = n_detectedAnom / n_norm\n detection[i] = n_detectedNorm / n_anom\n\nplt.plot(score_sorted,coverage,marker = \"o\",linestyle=\"dashed\")\nplt.plot(score_sorted,detection,marker = \"o\")\nplt.show()\n","repo_name":"miurashinnosuke/ijo","sub_path":"4-1.py","file_name":"4-1.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"32039826052","text":"# -*- coding: utf-8 -*-\n\n# 终止协程与异常处理: throw 和 close\n\n\"\"\"\n协程有两种终止方式: 第一提供一个 哨兵值,当send 该值是终止,然后协程会返回值。或者调用 close 方法,会传递 GeneratorExitException,\n如果没有处理则终止协程,且调用方不会报错\n\"\"\"\n\n\nclass DemoException(Exception):\n \"\"\"演示\"\"\"\n pass\n\n\ndef demo_exc_handling():\n print(\"-> coroutine started\")\n while True:\n try:\n x = yield\n except DemoException as e:\n print('-> DemoException handled Continuing')\n else:\n print('-> coroutine received :', x)\n raise RuntimeError(\"this line should never run\")\n\n\nexec_coro = demo_exc_handling()\nnext(exec_coro)\nexec_coro.send(11)\nexec_coro.send(12)\n# 发送 GeneratorExit 异常,没有处理的话会正常退出\nexec_coro.close()\n# 发送指定异常,如果没有处理则向上冒泡\n# exec_coro.throw(ZeroDivisionError)\nfrom inspect import getgeneratorstate\nprint(getgeneratorstate(exec_coro))\n\n\n# 让协程返回值\nfrom collections import namedtuple\nResult = namedtuple(\"Result\", \"count average\")\n\n\ndef averager():\n total = 0.0\n count = 0\n average = None\n while True:\n term = yield\n if term is None:\n break\n total += term\n count += 1\n average = total / count\n return Result(count, average)\n\n\nprint(\"==============\")\n\ncoro_avg = averager()\nnext(coro_avg)\ncoro_avg.send(10)\ncoro_avg.send(30)\ncoro_avg.send(6.5)\ntry:\n excepton = coro_avg.send(None)\n\n# 传递 None 后导致协程中循环 break 结束,协程结束,抛出 StopIterException\n# 然后 return 的表达式会传给调用方,将值赋给 StopIterException 的 value 属性\n# 在这里可以通过 try-catch 捕获异常后获取其 value, 而通过 yield from ,解释器不仅可以捕获 StopIterException ,还可以将\n# 其 value 属性的值变成 yield from 表达式的值\nexcept StopIteration as e:\n print(e.value)\n","repo_name":"zouyingjie/FluentPythonNote","sub_path":"cp16/demo03.py","file_name":"demo03.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"24803860552","text":"# Modules\nimport os\nimport csv\n\n# Set path for file\nfile = os.path.join(\"budget_data.csv\")\n\nmonths = []\ntotal_revenue = []\ntotal_revenue_sum = 0\n\nwith open(file) as csvfile:\n csv_data = csv.reader(csvfile, delimiter=',')\n header_out = next(csv_data, None)\n\n for row in csv_data:\n # push each month to months list\n months.append(row[0])\n # push each value to prof_loss\n total_revenue.append(int(row[1]))\n #gets len of months, print single count number as it is oulside the loop. \n total_months = len(months)\n #print(total_revenue)\n #sets variables to start at 0 value\n gt_revenue_increase = total_revenue[0]\n gt_revenue_decrease = total_revenue[0]\n #print (gt_revenue_decrease)\n #print (gt_revenue_decrease)\n \n #loops through each row in total_revenue list \n for row in range(0,len(total_revenue)):\n\n #gets greatest revenue increse value and month\n if total_revenue[row] >= gt_revenue_increase:\n gt_revenue_increase = total_revenue[row]\n #print(gt_revenue_increase)\n gt_revenue_increase_month = months[row]\n #gets greatest revenue descrease value and month\n elif total_revenue[row] < gt_revenue_decrease:\n gt_revenue_decrease = total_revenue[row] \n gt_revenue_decrease_month = months[row]\n \n #gets total revenue\n total_revenue_sum = total_revenue_sum + total_revenue[row]\n #print (total_revenue_sum)\n \n#Average calculation \naverage_change = round(sum(total_revenue) / len(months),2) \n\nprint(total_months)\nprint(total_revenue_sum)\nprint(average_change)\nprint(gt_revenue_increase)\nprint(gt_revenue_decrease)\nprint(gt_revenue_increase_month)\nprint(gt_revenue_decrease_month)\n#File to write results to\noutput_path = os.path.join(\"pybank_output.txt\")\n#output_path = os.path.join(\"pybank_output.csv\")\n\n\n# Open the file using \"write\" mode. Specify the variable to hold the contents\nwith open(output_path,'w') as csvfile:\n\n\n # Initialize csv.writer\n #csvwriter = csv.writer(csvfile)\n\n # Write the first row (column headers)\n csvfile.write('Financial Analysis\\n')\n csvfile.write('----------------------------' + '\\n')\n # Write the second row forwards\n csvfile.write('Total Months: ' + str(total_months) + '\\n')\n csvfile.write('Total: $' + str(total_revenue_sum) + '\\n') \n csvfile.write('Average Change: $' + str(average_change) + '\\n')\n csvfile.write('Greatest Increase in Profits: $ ' + str(gt_revenue_increase) + str(gt_revenue_increase_month) + '\\n')\n csvfile.write('Greatest Decrease in Profits: $' + str(gt_revenue_decrease) + str(gt_revenue_decrease_month) + '\\n')\n \n\n\n \n \n\n\n\n\n\n ","repo_name":"FeLowe/NW-Bootcamp-Homework","sub_path":"Python/PyBank/pybank_main.py","file_name":"pybank_main.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"38539118914","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Author : python_DJ\n# @contact : 185381664@qq.com\n# @Time : 2022/6/26-15:45\n# @File : threads.py\nimport time\nfrom pathlib import Path\nfrom random import randint\nfrom PyQt5.QtCore import QThread, pyqtSignal\n\n\nclass NewTaskThread(QThread):\n success = pyqtSignal(int, str, str, str)\n error = pyqtSignal(int, str, str, str)\n\n def __init__(self, *args, **kwargs):\n super(NewTaskThread, self).__init__(*args, **kwargs)\n self.row_index = None\n self.asin = None\n\n def run(self) -> None:\n \"\"\"线程任务\"\"\"\n time.sleep(3)\n self.success.emit(self.row_index, self.asin, \"标题\", \"url\")\n\n\nclass TaskThread(QThread):\n start_signal = pyqtSignal(int)\n stop_signal = pyqtSignal(int)\n count_signal = pyqtSignal(int, bool)\n\n def __init__(self, *args, **kwargs):\n super(TaskThread, self).__init__(*args, **kwargs)\n self.scheduler = None\n self.row_index = None\n self.asin = None\n self.log_file_path = None\n\n def run(self) -> None:\n \"\"\"线程任务\"\"\"\n time.sleep(3)\n self.start_signal.emit(self.row_index)\n while True:\n # scheduler 对象里面有一个terminate属性,更具它的值来判断是否结束线程\n if self.scheduler.terminate:\n # 如果点击停止,则停止线程,并且修改当前行的状态\n self.stop_signal.emit(self.row_index)\n # self就是指当前的线程对象,调用scheduler对象里面的方法,列表移除对象本身,并return结束\n self.scheduler.destroy_thread(self)\n return\n time.sleep(randint(2, 5))\n if not Path(self.log_file_path).exists():\n print(\"日志文件不存在{}\".format(self.asin))\n Path(self.log_file_path).mkdir(parents=True, exist_ok=True)\n path = Path(self.log_file_path) / \"{}.log\".format(self.asin)\n with open(str(path),\"a\",encoding=\"utf-8\") as f:\n f.write(str(self.row_index)+\"\\n\")\n else:\n print(\"日志文件存在{}\".format(self.asin))\n path = Path(self.log_file_path) / \"{}.log\".format(self.asin)\n with open(str(path),\"a\",encoding=\"utf-8\") as f:\n f.write(str(self.row_index)+\"\\n\")\n self.count_signal.emit(self.row_index, True)\n\n\nclass StopThread(QThread):\n \"\"\"\n 停止按钮,监控线程列表的个数\n \"\"\"\n update_signal = pyqtSignal(str)\n\n def __init__(self):\n super(StopThread, self).__init__()\n self.scheduler = None\n\n def run(self) -> None:\n # 监测线程的数量\n total_count = len(self.scheduler.thread_list) # 总线程数量\n while True:\n running_count = len(self.scheduler.thread_list) # 剩余线程数量\n # 更新页面\n self.update_signal.emit(\"正在终止,存活线程:{}/{}\".format(running_count, total_count))\n if running_count == 0:\n self.update_signal.emit(\"已终止\")\n break\n time.sleep(1)\n pass\n","repo_name":"guochengwei111/amazon_qt","sub_path":"utils/threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"43824915112","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing\n\nwith open('./bilibili.txt', 'r+',encoding='utf-8') as f:\n lst=[]\n for line in f.readlines():\n lst.append(line.split(','))\nX = np.array([[int(i[4]),int(i[8]),int(i[9]),int(i[10]),int(i[11]),int(i[12])] for i in lst[0:50:]])\nX_df = pd.DataFrame(X)\nscaler = preprocessing.MinMaxScaler().fit(X_df)\nX_scaler = pd.DataFrame(scaler.transform(X_df))\n# 主成分分析建模\npca = PCA(n_components=None) # n_components提取因子数量,None,返回所有主成分\npca.fit(X_scaler)\npca.explained_variance_ # 贡献方差,即特征根\npca.explained_variance_ratio_ # 方差贡献率\npca.components_ # 成分矩阵\nk1_spss = pca.components_ / np.sqrt(pca.explained_variance_.reshape(-1, 1)) # 成分得分系数矩阵\n# 确定权重\n# 求指标在不同主成分线性组合中的系数\nj = 0\nWeights = []\nfor j in range(len(k1_spss)):\n for i in range(len(pca.explained_variance_)):\n Weights_coefficient = np.sum(100 * (pca.explained_variance_ratio_[i]) * (k1_spss[i][j])) / np.sum(\n pca.explained_variance_ratio_)\n j = j + 1\n Weights.append(np.float(Weights_coefficient))\nprint('Weights',Weights)\n# 权重结果进行归一化\nWeights=pd.DataFrame(Weights)\nWeights1 = preprocessing.MinMaxScaler().fit(Weights)\nWeights2 = Weights1.transform(Weights)\nprint(Weights2)\n","repo_name":"dzxmxd/bilibili_master","sub_path":"Principal_Component_Analysis.py","file_name":"Principal_Component_Analysis.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"15"} +{"seq_id":"28269183297","text":"\n\n\n\ndef max_num(num_one, num_two, num_three):\n num_high = 0\n if num_one > num_two:\n num_high = num_one\n else: \n num_high = num_two\n if num_high < num_three:\n num_high = num_three\n print(num_high)\n\nmax_num(4, 7, 66)\n\n\n\ndef mult_list(list_one):\n add_one = 0\n increment_one = 0\n print(list_one)\n for n in list_one:\n increment_one += 1\n if increment_one == 1:\n add_one = n\n else:\n add_one *= n\n print(add_one)\n\nmult_list([3,2,2])\n\n\n\n\ndef rev_string(string_one):\n if len(string_one) == 0:\n\n return string_one \n\n else:\n\n return rev_string(string_one[1:]) + string_one[0]\n\n\nprint(\"Reverse: \", rev_string(\"seven seven seven nine nine nine\"))\n\n\ndef num_within(number, beg_range, end_range):\n bool_one = False\n for n in range(beg_range, end_range + 1):\n #print(n)\n if number == n:\n bool_one = True\n print(bool_one)\n\n\nnum_within(3,2,4)\nnum_within(3,1,3)\nnum_within(10, 2, 5)\n \n\n\n \ntriangle = [[1],[1,1]]\ndef pascal(n):\n #base case\n if n < 1:\n print(\"invalid number of rows\")\n elif n == 1:\n print(triangle[0])\n else:\n row_number = 2\n #fill up correct number of rows in triangle\n while len(triangle) < n:\n row = []\n row_prev = triangle[row_number - 1]\n #create correct row, then add to triangle (this row will be 1 longer than row before it)\n length = len(row_prev)+1\n for i in range(length):\n #first number is 1\n if i == 0:\n row.append(1)\n #intermediate nunmbers get added from previous rows\n elif i > 0 and i < length-1:\n row.append(triangle[row_number-1][i-1]+triangle[row_number-1][i])\n #last number is 1\n else:\n row.append(1)\n triangle.append(row)\n row_number += 1\n\n #print triangle\n for row in triangle:\n print(row)\n\npascal(2)\npascal(5)\n\n\n\n","repo_name":"ronCodeTracker/python-files-2022","sub_path":"functions_practice_part4.py","file_name":"functions_practice_part4.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"13229406567","text":"import pandas as pd\nimport numpy as np\n\ntrain_label = pd.read_csv(\"BBC_News_5_Train_Labels.csv\", header=None)\ntrain = pd.read_csv(\"BBC_News_5_Train.csv\", header=None)\ntest = pd.read_csv(\"BBC_News_5_Test.csv\", header=None)\ntest_label = pd.read_csv(\"BBC_News_5_Test_Labels.csv\", header=None)\nfeature_ = pd.read_csv(\"BBC_News_5_Features.csv\", header=None)\nfull_data = pd.read_csv(\"bbc-5categories.csv\")\n\n\ndef Rocchio_Classify_train(train, train_label):\n \"This function classified text using Rocchio Method\"\n\n merged_df = pd.merge(train, train_label, left_index=True, right_index=True)\n # merged_df=merged_df.set_index(\"0_x\")\n\n ################\n # train.head()\n indexed_train = train.set_index(0)\n indexed_train.head()\n\n # Create an empty dictionary to hold the inverted index\n inverted_index = {}\n\n # Loop through each row in the DataFrame and update the inverted index\n for i, row in merged_df.iterrows():\n train_id = row[\"0_y\"]\n if train_id not in inverted_index:\n inverted_index[train_id] = []\n inverted_index[train_id].append(merged_df[\"0_x\"][i])\n\n # Create a list of dictionaries to store the data\n\n norm_ = {}\n proto__ = {}\n for token in inverted_index:\n protoo_ = (\n indexed_train.loc[inverted_index[token]].sum(axis=0)\n ).values ##COMPUTED THE protptype for each CATEGORY\n proto__[token] = protoo_\n # Compute the norm of each array in the dictionary\n for value in proto__.values():\n norm = np.linalg.norm(value)\n norm_[token] = norm\n return norm_, proto__\n\n\ndef Rocchio_Classify_test(test_instance, train_data, train_label):\n \"This function runs test using the Rocchio_Classify_train model above\"\n\n ##Load or run the train model\n D_norm, prototypee = Rocchio_Classify_train(train_data, train_label)\n\n x_norm = np.linalg.norm(test_instance)\n # Compute the cosine SIM on the new instance and each protopype\n\n sims = {\n k: np.dot(prototypee[k], test_instance) / (D_norm[k] * x_norm)\n for k in prototypee\n }\n # rank the dictionary by values\n\n sorted_list = sorted(sims.items(), key=lambda x: x[1])\n\n return sorted_list[-1][0], sorted_list[-1][1]\n\n\ndef run_roochio(\n Rocchio_Classify_train, train, train_label, test, set_index=False, k=None\n):\n if set_index:\n test = test.set_index(0)\n\n if k is None:\n print(\"The default runs the first 20 documents\")\n k_values = range(0, 20, 1)\n else:\n k_values = [k]\n\n for k in k_values:\n test_instance = np.array(test.iloc[k])\n predicted_class, cosin_SIM = Rocchio_Classify_test(\n test_instance, train, train_label\n )\n print(\n f\"Test Item {k} - Predicted Class: {predicted_class} -- Actual Class: {test_label[0].iloc[k]} COSINE_SIM: {cosin_SIM}\"\n )\n return predicted_class","repo_name":"sofarikasid/CI_with_Rocchio_Method_for_Doc_Classification","sub_path":"rocchio_classify.py","file_name":"rocchio_classify.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"37493459606","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Club',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('name', models.CharField(max_length=255)),\n ('short_name', models.CharField(max_length=63)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='County',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('name', models.CharField(max_length=255)),\n ('short_name', models.CharField(max_length=63)),\n ],\n options={\n 'verbose_name_plural': 'counties',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Region',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('name', models.CharField(max_length=255)),\n ('short_name', models.CharField(max_length=63)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Round',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('name', models.CharField(max_length=255)),\n ('shoot_type', models.CharField(choices=[('I', 'Indoor Target'), ('O', 'Outdoor Target'), ('F', 'Field'), ('C', 'Clout'), ('W', 'Wand'), ('L', 'Flight'), ('O', 'Other'), ('M', 'Mixed')], max_length=1)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Series',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('name', models.CharField(max_length=255)),\n ],\n options={\n 'verbose_name_plural': 'series',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Tournament',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('name', models.CharField(max_length=511)),\n ('start_date', models.DateField()),\n ('end_date', models.DateField()),\n ('status', models.CharField(choices=[('listed', 'Listed'), ('open', 'Entries open'), ('full', 'Full'), ('ongoing', 'Happening now'), ('past', 'Completed')], max_length=31)),\n ('shoot_type', models.CharField(choices=[('I', 'Indoor Target'), ('O', 'Outdoor Target'), ('F', 'Field'), ('C', 'Clout'), ('W', 'Wand'), ('L', 'Flight'), ('O', 'Other'), ('M', 'Mixed')], max_length=1)),\n ('record_status', models.CharField(choices=[('None', 'No record status'), ('UKRS', 'UK record status'), ('WRS', 'World record status'), ('Rose', 'Rose & UK record status'), ('AH', 'Arrowhead'), ('Tassel', 'Tassel status'), ('TUKRS', 'Tassel & UK record status')], max_length=7)),\n ('latitude', models.FloatField(null=True, blank=True)),\n ('longitude', models.FloatField(null=True, blank=True)),\n ('organising_club', models.ForeignKey(blank=True, null=True, to='tournaments.Club')),\n ('organising_county', models.ForeignKey(blank=True, null=True, to='tournaments.County')),\n ('rounds', models.ManyToManyField(to='tournaments.Round')),\n ('series', models.ForeignKey(blank=True, null=True, to='tournaments.Series')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='county',\n name='region',\n field=models.ForeignKey(blank=True, null=True, to='tournaments.Region'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='club',\n name='county',\n field=models.ForeignKey(blank=True, null=True, to='tournaments.County'),\n preserve_default=True,\n ),\n ]\n","repo_name":"mjtamlyn/bowcal","sub_path":"tournaments/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"1217601797","text":"from random import choice\n\nfrom PySide.QtCore import Qt\nfrom PySide.QtGui import QGridLayout\n\nfrom movepoints import MovePoints\nfrom movedisplay import MoveDisplay\n\nclass MovePresenter(object):\n def __init__(self, view, uistack):\n self._view = view\n self._view.reset.clicked.connect(self.reset)\n self._view.done.clicked.connect(self.done)\n\n self._model = MovePoints(6400)\n\n self._display = MoveDisplay(self._model)\n\n self._view.content.setLayout(QGridLayout())\n self._view.content.layout().addWidget(self._display)\n\n self._view.rotate.setValue(self._display.rotate)\n self._view.rotate.sliderMoved.connect(self.rotate)\n\n self._view.trail.setCheckState(Qt.Checked if self._display.trail else Qt.Unchecked)\n self._view.trail.stateChanged.connect(self.trail)\n\n self._view.step.clicked.connect(self.step)\n\n self._view.direction.sliderMoved.connect(self.direction)\n self._view.speed.sliderMoved.connect(self.speed)\n \n self._model.speed(self._view.speed.value())\n\n self._view.count.setNum(self._model.count)\n\n self._uistack = uistack\n\n def step(self):\n self._model.step()\n self._view.time.setNum(self._model.time)\n self._view.count.setNum(self._model.count)\n self._display.invalidate()\n self._view.content.update()\n\n def direction(self, value):\n self._model.direction(value)\n self._view.content.update()\n\n def speed(self, value):\n self._model.speed(value)\n self._view.content.update()\n\n def rotate(self, value):\n self._display.rotate = value\n self._view.content.update()\n\n def trail(self, state):\n self._display.trail = state == Qt.Checked\n self._display.invalidate()\n self._view.content.update()\n\n def done(self):\n self._uistack.pop()\n\n def reset(self):\n self._model.reset(self._view.direction.value(), self._view.speed.value())\n self._display.invalidate()\n self._view.content.update()\n","repo_name":"tps12/Tec-Nine","sub_path":"movepresenter.py","file_name":"movepresenter.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"22024245897","text":"import random\n\njogador = computador = escolhajogador = escolhacomputador = contador = 0\n#1 é Par / 2 é Ímpar\n\nwhile True:\n escolhajogador = int(input('Você quer par ou ímpar? 1(Par) ou 2(Ímpar) '))\n if escolhajogador == 1:\n escolhacomputador = 2\n elif escolhajogador == 2:\n escolhacomputador = 1\n else:\n print('Você digitou errado, tente novamente!')\n print('A escolha do computador é Par' if escolhacomputador == 1 else 'A escolha do computador é Ímpar')\n print('A escolha do jogador é Par' if escolhajogador == 1 else 'A escolha do jogador é Ímpar')\n\n computador = random.randint(1, 5)\n print(f'A jogada do computador é {computador}')\n jogador = int(input('Qual a sua jogada? '))\n print(f'A minha jogada é {jogador}')\n\n if (jogador % 2 == 0 and computador % 2 == 0) or (jogador % 2 == 1 and computador % 2 == 1):\n if escolhajogador == 1:\n contador += 1\n print(f'Parabéns! Você ganhou! E já tem {contador} vitórias.')\n if escolhajogador == 2:\n print('Que pena, você perdeu!')\n break\n if (jogador % 2 == 1 and computador % 2 == 0) or (jogador % 2 == 0 and computador % 2 == 1):\n if escolhajogador == 2:\n contador += 1\n print(f'Parabéns! Você ganhou! E já tem {contador} vitórias.')\n if escolhajogador == 1:\n print('Que pena, você perdeu!')\n break\n\n\n\n\n\n\n","repo_name":"joaopoguerra/HTML-CSS","sub_path":"Desafios/d012/ex068.py","file_name":"ex068.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"16984687012","text":"def f(it):\n i, n = 0, len(it)\n while i < n: \n yield (i, it[i])\n i += 1\nprint(*f(list(\"csikóhal\")))\n\n# vagy:\nprint(*enumerate(list(\"csikóhal\")))\n\n# használata:\nl = \"első,második,harmadik,negyedik,ötödik,hatodik\".split(\",\")\nprint(*[f'{i + 1}.: {x}' for i, x in enumerate(l)], sep=\"\\n\")","repo_name":"tomuwhu/tig","sub_path":"py/enum.py","file_name":"enum.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"hu","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"27616801625","text":"#!/usr/bin/env python\nfrom .reporter import Reporter\nfrom .phuzzers import Phuzzer\nimport pkg_resources\nimport logging.config\nimport importlib\nimport argparse\nimport tarfile\nimport shutil\nimport socket\nimport time\nimport imp\nimport os\n\ntry:\n import driller\n DRILLER_EXISTS = True\nexcept ImportError:\n DRILLER_EXISTS=False\n\nfrom . import GreaseCallback\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Shellphish fuzzer interface\")\n parser.add_argument('binary', help=\"the path to the target binary to fuzz\")\n parser.add_argument('-g', '--grease-with', help=\"A directory of inputs to grease the fuzzer with when it gets stuck.\")\n parser.add_argument('-d', '--driller_workers', help=\"When the fuzzer gets stuck, drill with N workers.\", type=int)\n parser.add_argument('-f', '--force_interval', help=\"Force greaser/fuzzer assistance at a regular interval (in seconds).\", type=float)\n parser.add_argument('-w', '--work-dir', help=\"The work directory for AFL.\", default=\"/dev/shm/work/\")\n\n parser.add_argument('-l', '--login-data', help=\"The json file from which to get the login information\", default=\"\")\n parser.add_argument('-c', '--afl-cores', help=\"Number of AFL workers to spin up.\", default=1, type=int)\n parser.add_argument('-C', '--first-crash', help=\"Stop on the first crash.\", action='store_true', default=False)\n parser.add_argument('-Q', '--use-qemu', help=\"Use qemu to trace binary.\", action='store_true', default=False)\n parser.add_argument('-t', '--timeout', help=\"Timeout (in seconds).\", type=float, default=None)\n parser.add_argument('-i', '--ipython', help=\"Drop into ipython after starting the fuzzer.\", action='store_true')\n parser.add_argument('-T', '--tarball', help=\"Tarball the resulting AFL workdir for further analysis to this file -- '{}' is replaced with the hostname.\")\n parser.add_argument('-m', '--helper-module',\n help=\"A module that includes some helper scripts for seed selection and such.\")\n parser.add_argument('-D', '--dictionary', default=None,\n help=\"Load the dictionary from a file, with each on a single line \")\n parser.add_argument('--memory', help=\"Memory limit to pass to AFL (MB, or use k, M, G, T suffixes)\", default=\"8G\")\n parser.add_argument('--no-dictionary', help=\"Do not create a dictionary before fuzzing.\", action='store_true', default=False)\n parser.add_argument('--logcfg', help=\"The logging configuration file.\", default=\".shellphuzz.ini\")\n parser.add_argument('-s', '--seed-dir', action=\"append\", help=\"Directory of files to seed fuzzer with\")\n parser.add_argument('--run-timeout', help=\"Number of milliseconds permitted for each run of binary\", type=int, default=None)\n parser.add_argument('--driller-timeout', help=\"Number of seconds to allow driller to run\", type=int, default=10*60)\n parser.add_argument('--length-extension', help=\"Try extending inputs to driller by this many bytes\", type=int)\n parser.add_argument('--target-opts', help=\"Options to pass to target.\", default=None, nargs='+')\n parser.add_argument('-r', '--resume', help=\"Resume prior run if possible and do not destroy work directory.\",\n action='store_true', default=False)\n parser.add_argument('--reportdir', help=\"The directory to use for the reports.\", default=\".\")\n parser.add_argument('-p','--phuzzer-type', '--fuzzer-type', help=\"Which phuzzer are you using: AFL, AFL_IJON, AFL++, Witcher, AFL_MULTICB.\", default=Phuzzer.AFL)\n args = parser.parse_args()\n\n if os.path.isfile(os.path.join(os.getcwd(), args.logcfg)):\n logging.config.fileConfig(os.path.join(os.getcwd(), args.logcfg))\n\n try: os.mkdir(\"/dev/shm/work/\")\n except OSError: pass\n\n if args.helper_module:\n try:\n helper_module = importlib.import_module(args.helper_module)\n except (ImportError, TypeError):\n helper_module = imp.load_source('fuzzing_helper', args.helper_module)\n else:\n helper_module = None\n\n drill_extension = None\n grease_extension = None\n\n if args.grease_with:\n print (\"[*] Greasing...\")\n grease_extension = GreaseCallback(\n args.grease_with,\n grease_filter=helper_module.grease_filter if helper_module is not None else None,\n grease_sorter=helper_module.grease_sorter if helper_module is not None else None\n )\n\n if args.driller_workers and DRILLER_EXISTS:\n print (\"[*] Drilling...\")\n drill_extension = driller.LocalCallback(num_workers=args.driller_workers, worker_timeout=args.driller_timeout, length_extension=args.length_extension)\n\n stuck_callback = (\n (lambda f: (grease_extension(f), drill_extension(f))) if drill_extension and grease_extension\n else drill_extension or grease_extension\n )\n\n seeds = None\n if args.seed_dir:\n seeds = []\n print (\"[*] Seeding...\")\n for dirpath in args.seed_dir:\n for filename in os.listdir(dirpath):\n filepath = os.path.join(dirpath, filename)\n if not os.path.isfile(filepath):\n continue\n with open(filepath, 'rb') as seedfile:\n seeds.append(seedfile.read())\n\n if args.dictionary:\n built_dict = open(args.dictionary,\"rb\").read().split(b\"\\n\")\n else:\n built_dict = None\n\n print (\"[*] Creating fuzzer...\")\n fuzzer = Phuzzer.phactory(phuzzer_type=args.phuzzer_type,\n target=args.binary, work_dir=args.work_dir, seeds=seeds, afl_count=args.afl_cores,\n create_dictionary=not args.no_dictionary, timeout=args.timeout,\n memory=args.memory, run_timeout=args.run_timeout, dictionary=built_dict, use_qemu=args.use_qemu,\n resume=args.resume, target_opts=args.target_opts\n )\n\n # start it!\n print (\"[*] Starting fuzzer...\")\n fuzzer.start()\n start_time = time.time()\n\n reporter = Reporter(args.binary, args.reportdir, args.afl_cores, args.first_crash, args.timeout, fuzzer.work_dir )\n\n reporter.start()\n\n if args.ipython:\n print (\"[!]\")\n print (\"[!] Launching ipython shell. Relevant variables:\")\n print (\"[!]\")\n print (\"[!] fuzzer\")\n if args.driller_workers and DRILLER_EXISTS:\n print (\"[!] driller_extension\")\n if args.grease_with:\n print (\"[!] grease_extension\")\n print (\"[!]\")\n import IPython; IPython.embed()\n\n try:\n loopcnt = 0\n #print (\"[*] Waiting for fuzzer completion (timeout: %s, first_crash: %s).\" % (args.timeout, args.first_crash))\n crash_seen = False\n reporter.enable_printing()\n\n while True:\n\n if not crash_seen and fuzzer.found_crash():\n # print (\"\\n[*] Crash found!\")\n crash_seen = True\n reporter.set_crash_seen()\n if args.first_crash:\n break\n if fuzzer.timed_out():\n reporter.set_timeout_seen()\n print(\"\\n[*] Timeout reached.\")\n break\n\n time.sleep(1)\n loopcnt += 1\n\n except KeyboardInterrupt:\n end_reason = \"Keyboard Interrupt\"\n print (\"\\n[*] Aborting wait. Ctrl-C again for KeyboardInterrupt.\")\n except Exception as e:\n end_reason = \"Exception occurred\"\n print (\"\\n[*] Unknown exception received (%s). Terminating fuzzer.\" % e)\n fuzzer.stop()\n if drill_extension:\n drill_extension.kill()\n raise\n\n print (\"[*] Terminating fuzzer.\")\n reporter.stop()\n\n fuzzer.stop()\n if drill_extension:\n drill_extension.kill()\n\n if args.tarball:\n print (\"[*] Dumping results...\")\n p = os.path.join(\"/tmp/\", \"afl_sync\")\n try:\n shutil.rmtree(p)\n except (OSError, IOError):\n pass\n shutil.copytree(fuzzer.work_dir, p)\n\n tar_name = args.tarball.replace(\"{}\", socket.gethostname())\n\n tar = tarfile.open(\"/tmp/afl_sync.tar.gz\", \"w:gz\")\n tar.add(p, arcname=socket.gethostname()+'-'+os.path.basename(args.binary))\n tar.close()\n print (\"[*] Copying out result tarball to %s\" % tar_name)\n shutil.move(\"/tmp/afl_sync.tar.gz\", tar_name)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"angr/phuzzer","sub_path":"phuzzer/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":8416,"program_lang":"python","lang":"en","doc_type":"code","stars":144,"dataset":"github-code","pt":"15"} +{"seq_id":"8894353205","text":"from flask.ext.wtf import Form\nfrom wtforms import TextField,BooleanField,validators,DateField,IntegerField,TextAreaField\n#from wtforms.validators import required\n\nclass LoginForm(Form):\n\topenid = TextField('openid', [validators.required()])\n\tremember_me = BooleanField('remember_me',default = False)\n\nclass SignUpForm(Form):\n\tusername = TextField('Username',[validators.required()])\n\temail = TextField('email', [validators.Email(),validators.Length(min=6, max=120)])\n\t#email = TextField('Email', [validators.Length(min=6, max=120), validators.Email(),validators.Required])#error\n\nclass TaxForm(Form):\n\tstartdate = DateField('startdate',[validators.required()])\n\tenddate = DateField('enddate',[validators.required()])\n\tsalary = IntegerField('salary',[validators.required()])\n\nclass EditForm(Form):\n\tnickname = TextField('nickname',[validators.required()] )\n\tabout_me = TextAreaField('about_me',[validators.Length(min=0,max=120)])\n","repo_name":"sebin7tony/mircoblog-example","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"14772761786","text":"#----------------------------------------------------------------------------\n# Created By : Leonardo Citraro leonardo.citraro@epfl.ch\n# Date: 2021\n# ---------------------------------------------------------------------------\nimport sys\nimport os\nimport numpy as np\nimport networkx as nx\nimport imageio\nimport subprocess\nimport cv2\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nfrom .utils import mkdir\nfrom ..types import Detection, DetectionTracklet\n\ncolors = [[255,0,0], [0,255,0], \n [100,100,255], [255,255,0], \n [0,255,255], [255,0,255],\n [225,225,225], [0,0,0],\n [128,128,128], [50,128,50]]+[np.random.randint(0,255,3).tolist() for _ in range(1000)] \n\ndef draw_points(image, centers, radius, color='r'): \n \"\"\" Draws filled point on the image\n \"\"\"\n _image = image.copy() \n if color=='r':\n color = [255,0,0]\n elif color=='g':\n color = [0,255,0]\n elif color=='b':\n color = [0,0,255]\n elif color=='w':\n color = [255,255,255]\n elif color=='k':\n color = [0,0,0]\n \n for point in centers:\n _image = cv2.circle(_image, tuple(point.astype(np.int)), radius, color=color, thickness=-1)\n return _image\n\ndef draw_rectangles(image, centers, size, color='r', thickness=3): \n \"\"\" Draws rectangles on the image\n \"\"\" \n _image = image.copy()\n if color=='r':\n color = [255,0,0]\n elif color=='g':\n color = [0,255,0]\n elif color=='b':\n color = [0,0,255]\n elif color=='w':\n color = [255,255,255]\n elif color=='k':\n color = [0,0,0]\n \n for i, (x,y) in enumerate(np.int_(centers)):\n pt1 = (x-size[1]//2, y-size[0]//2)\n pt2 = (x+size[1]//2, y+size[0]//2)\n _image = cv2.rectangle(_image, pt1, pt2, color=color, thickness=thickness)\n return _image\n\ndef plot_graph(graph, node_size=100, font_size=12, \n node_color='y', edge_color='y', \n linewidths=2,\n offset=np.array([0,0]), \n source_pos=None, \n target_pos=None, \n show_source_sink_nodes=True,\n verbose=True, **kwargs):\n \n graph_ = graph.copy()\n \n if verbose:\n if len(graph_.nodes())>500:\n print(\"The graph is big. Plotting it may take a while.\")\n \n positions = {}\n ps = []\n for n in graph_.nodes():\n if 'detection' in graph_.nodes[n]:\n if isinstance(graph_.nodes[n]['detection'], DetectionTracklet):\n positions[n] = np.mean([d.position[:2] for d in graph_.nodes[n]['detection'].tracklet], axis=0)\n elif isinstance(graph_.nodes[n]['detection'], Detection):\n positions[n] = graph_.nodes[n]['detection'].position[:2]\n else:\n raise RuntimeError(\"Detection object must inherit from Detection or DetectionTracklet not '{}'!\".format(type(graph_.nodes[n]['detection']))) \n ps.append(positions[n]) \n ps = np.array(ps)\n\n xmin, ymin = ps.min(0)\n xmax, ymax = ps.max(0)\n w,h = (xmax-xmin), (ymax-ymin)\n \n nodes = graph_.nodes()\n SOURCE = min(nodes)\n SINK = max(nodes) \n \n if not show_source_sink_nodes:\n graph_.remove_node(SOURCE)\n graph_.remove_node(SINK)\n\n pos = {}\n for n in graph_.nodes():\n node = graph_.nodes[n]\n if node['label']=='source':\n p = source_pos if source_pos is not None else np.array([xmin-w*0.15, ymin-h*0.15])\n elif node['label']=='sink':\n p = target_pos if target_pos is not None else np.array([xmax+w*0.15, ymax+h*0.15]) \n elif node['label']=='pre-node':\n p = positions[n]-np.array([w*0.025, 0])\n elif node['label']=='post-node':\n p = positions[n]+np.array([w*0.025, 0])\n \n pos[n] = p+offset\n \n nx.draw_networkx(graph_, pos=pos, node_size=node_size, node_color=node_color,\n edge_color=edge_color, font_size=font_size, **kwargs)\n #plt.gca().invert_yaxis()\n plt.legend() \n \ndef plot_trajectories(trajectories, axis=(0,1), linewidth=2, nodesize=7, \n display_time=False, fontsize=8, display_time_every=1, \n filter_index=None, calibration=None):\n import matplotlib.pyplot as plt\n\n for track,color in zip(trajectories, colors):\n color = tuple(c/255.0 for c in color)\n positions = []\n times = []\n for detection in track:\n \n if isinstance(detection, DetectionTracklet):\n positions_ = [d.position for d in detection.tracklet]\n time_ = [d.index for d in detection.tracklet]\n elif isinstance(detection, Detection):\n positions_ = [detection.position]\n time_ = [detection.index]\n else:\n raise RuntimeError(\"Detection object must inherit from Detection or DetectionTracklet not '{}'!\".format(type(detection))) \n \n if filter_index is None:\n for p,t in zip(positions_, time_):\n positions.append(p)\n times.append(t) \n else:\n for p,t in zip(positions_, time_):\n if t >= filter_index[0] and t0:\n\tif endlop=='end':\n\t\tbreak\n\tprint('kiouio')\n\tfor stir in a[-1::-1]:\n\t\tif endlop=='end':\n\t\t\tbreak\n\t\taddlasnum=''\n\t\tprint('jk',a,stir)\n\t\tif not stir.isnumeric():\n\t\t\tprint('hi')\n\t\t\tfor exloc in range(len(a)):\n\t\t\t\tif a[exloc]==stir:\n\t\t\t\t\tlocstir=exloc\t\t\t#last location\n\t\t\tfor antalp in a[locstir-1::-1]:\n\t\t\t\t\n\t\t\t\tif not antalp.isnumeric() and antalp!='.':\n\t\t\t\t\tfor exloc1 in range(len(a)):\n\t\t\t\t\t\tif a[exloc1]==antalp:\n\t\t\t\t\t\t\tlocstir1=exloc1\t\t\t#last location\n\t\t\t\t\tnow12=a[locstir1+1:locstir+1]\n\t\t\t\t\t\n\t\t\t\t\tappendst.append(now12)\n\t\t\t\t\ta=a.replace(a[locstir1+1:locstir+1],'')\n\t\t\t\t\tprint('appen',appendst)\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\taddlasnum=antalp+addlasnum\n\t\t\t\t\tprint('jakass',a,addlasnum)\n\t\t\t\t\tif len(addlasnum)+1==len(a):\n\t\t\t\t\t\tappendst.append(a)\n\t\t\t\t\t\tprint('yes')\n\t\t\t\t\t\tendlop='end'\nprint(appendst,'koi')\n\nfor jk in appendst:\n\tprint('hi')\n\taddpi=''\n\tfor kj in jk[-1::-1]:\n\t\t\n\t\tif not kj.isnumeric() and kj!='.':\n\t\t\t\n\t\t\tprint(kj)\n\t\t\tlockj=jk.find(kj)\n\t\t\tpi=jk[ :lockj]\n\t\t\t\n\t\t\taddpi=pi+addpi \n\t\t\tprint(addpi)\t\t\t\n","repo_name":"komerajaya456/All_2python","sub_path":"days ,y t,(num,alp)both append.py","file_name":"days ,y t,(num,alp)both append.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"5320233782","text":"from google.cloud import bigquery\n\n\ndef table_exists(table_id):\n \"\"\"Test if table exists on BQ.\n\n table_id = project_id.dataset.table_name\n\n \"\"\"\n client = bigquery.Client()\n try:\n client.get_table(table_id)\n return True\n except Exception as err:\n err\n return False\n\n\ndef upload_df_to_bq(table_id, df, replace=True):\n \"\"\"Upload a DataFrame to a BQ table.\n\n table_id = \"project_id.dataset.table_name\"\n\n BQ schema/types will be inferred from dataframe.\n Use pd.Timestamp(time_str) for columns w/date type.\n\n \"\"\"\n print('Uploading to BQ ...')\n client = bigquery.Client()\n\n w = \"WRITE_TRUNCATE\" if replace else \"WRITE_APPEND\"\n config = bigquery.LoadJobConfig(write_disposition=w)\n\n client.load_table_from_dataframe(\n df, table_id, job_config=config\n ).result() # waits for job to complete\n\n # check loaded table\n table = client.get_table(table_id)\n trows = table.num_rows\n drows = len(df)\n print(f\"{drows} rows loaded to table ({trows} total rows)\")\n","repo_name":"GlobalFishingWatch/paper-industrial-activity","sub_path":"nnets/utils/bq.py","file_name":"bq.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"29717742729","text":"\nimport datetime\n# I would like all dates and periods of time as datetime and timedelta objects\n\nclass Item:\n\n def __init__(self, name, id):\n\n self.name = name\n self.id = id\n self.checkout_date = None\n # initialise attributes common to all items\n\n def __del__(self):\n\n # delete an item\n pass\n\n def setdate(self, date=None):\n \"\"\"\n Set the date attribute of item to given date (on which item is checked out)\n :param date: datetime object of today's date\n \"\"\"\n if date == None:\n date = datetime.date.today()\n\n self.checkout_date = date\n # set date that item is checked out\n\n def checkin_item(self, date=None):\n \"\"\"\n Calculate fine resultant from item being overdue (if any)\n Set checkout_date to None, as item is no longer checked out\n :param date: datetime object of today's date\n :return: float, fine in pounds to be added to user's accrued fine due to the overdue return of this item\n \"\"\"\n if date == None:\n date = datetime.date.today()\n\n fine = self.calculate_fine(date)\n\n self.checkout_date = None\n # now item is returned reset checkout date to None\n\n return fine\n # the method checkin_item will return to the caller the fine which needs to be added to the User's fine accrued\n # if the item was not everdue this will return zero (so may as well be added)\n\n def find_days_overdue(self, date=None):\n \"\"\"\n Calculate number of days by which item is overdue\n If item is not overdue returns zero\n :param date: datetime object of today's date\n :return: float, number of days overdue\n \"\"\"\n if date == None:\n date = datetime.date.today()\n\n days_overdue = (date - self.checkout_date) - self.lend_time\n # calculate number of days item is overdue (may be negative)\n # will be timedelta object\n\n return days_overdue.days\n # return float number of days\n\n def is_overdue(self, date=None):\n \"\"\"\n Determine whether item is overdue\n :param date: datetime object of today's date\n :return: bool, if item is overdue\n \"\"\"\n if date == None:\n date = datetime.date.today()\n\n days_overdue = self.find_days_overdue(date)\n\n if days_overdue > 0:\n\n overdue = True\n\n else:\n\n overdue = False\n\n return overdue\n # if it is positive number of days overdue, then overdue is True\n\n def calculate_fine(self, date=None):\n \"\"\"\n Determine overdue fine from item at date\n :param date: datetime object of today's date\n :return: float, fine in pounds due to overdue\n \"\"\"\n if date == None:\n date = datetime.date.today()\n\n days_overdue = self.find_days_overdue(date)\n\n fine = 0\n\n if self.is_overdue(date):\n\n fine = self.fine_rate * days_overdue\n # calculate fine due on item\n\n return fine\n\n def is_onloan(self):\n \"\"\"\n Determine whether item is on loan\n :return: bool, if item is on loan\n \"\"\"\n\n if self.checkout_date == None:\n\n onloan = False\n\n elif isinstance(self.checkout_date, datetime.datetime):\n\n onloan = True\n\n return onloan\n # if it has a checkout date, it is on loan. If it doesn't, it is not\n\n\nclass Book(Item):\n\n lend_time = datetime.timedelta(weeks = 4)\n fine_rate = .5\n # fine rate 50p per day, loan period 4 weeks\n\n\nclass DVD(Item):\n\n lend_time = datetime.timedelta(weeks=1)\n fine_rate = 2\n # fine rate 200p per day, loan period 1 week\n\n\nclass Journal(Item):\n\n lend_time = datetime.timedelta(weeks=2)\n fine_rate = 1\n # fine rate 100p per day, loan period 2 weeks\n\n\n\n\n\n","repo_name":"purplecat7/PyCharmProjects","sub_path":"Level2/LibSys_2019_animalbaste/source/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"29238613566","text":"#J'ai pas pu le tester car ya pas python sur les ordi du cdi donc c'est pas sur que ca fonctionne n'oubliez pas de regarder le readme que j'ai mis dans la branch 1.0 sinon yaura forcement des erreures\nimport tkinter as tk\nimport Terrain\nimport Click\nimport Carte\nimport Main\nimport Deck\nimport Joueur\nimport sqlite3 as sq\nimport Methodes_utiles as mu\n\nclass Game:\n \"\"\"Classe gérant toutes les actions qui ne se rapportent pas directement aux joueurs (messages,etc...)\"\"\"\n #Attributs GAME\n _terrain = None #Objet Tableau\n _joueurs = None #Objet Joueur\n _joueur_tour = None #Int tour du joueur en cours 1 : j1 // 2: j2\n _click = None #Objet Click\n _cards = None #DATABASE des cartes\n _roi_joueur = None #Objet Carte\n _roi_adverse = None\n _decks_names = None\n _decks_cards = None\n #Attributs Graphics\n _fen = None\n _bouton_fin_tour = None\n\n def __init__(self):\n \"\"\" Initialisateur de la classe Game \"\"\"\n self.create_card_database() #Charge la db des cartes\n self.create_deck_database() #Charge la db des cartes\n self._bouton_fin_tour = tk.Button(self._fen,text=\"Fin du tour\",command=self.switch_turn) #Crée le bouton fin de tour\n self._bouton_fin_tour.grid(column=1,row=3) #Le grid (pack)\n \n def start(self):\n \"\"\" Methode qui va etre appelée pour lancer une Game \"\"\"\n #Creation du terrain\n taille_terrain = 8 #La taille du terrain (modulable)\n self._terrain = Terrain.Terrain(taille_terrain,taille_terrain,self.create_terrain_points(taille_terrain),self) #Creation de l'objet terrain\n #Creation es objets\n self._click = Click.Click(self) #Creation de l'objet click\n self._joueurs = [[],[],[]] #Initialisation des joueurs\n #Creation des decks\n self._decks = self.create_deck_database() #\n deck1 = self._decks_cards[0] #Crée le premier deck à partir de la db (celui d'index 0)\n deck2 = self._decks_cards[0] #Crée le second deck a partir de la db (celui d'index 0)\n #Attention ces decks sont chargés des index des cartes (1,2,3,4,5,...) et non des objets cartes\n d1 = []\n d2 = []\n for i in range(30): # Creation des objets cartes en fonction des index des cartes de chaque deck\n d1.append(self.create_card(deck1[i],1))\n d2.append(self.create_card(deck2[i],2))\n #Creation des zones de deploiement\n zones_deploiement1 = []\n zones_deploiement2 = []\n for i in range(taille_terrain):\n zones_deploiement2.append([i,0]) #Pour le joueur 2 : les lignes d'index 0 et 1\n zones_deploiement2.append([i,1])\n zones_deploiement1.append([i,taille_terrain-2]) #Pour le joueur 1 : les lignes d'index 6 et 7 (pour un terrain de taille 8)\n zones_deploiement1.append([i,taille_terrain-1])\n #Creation des joueurs\n self._joueurs[1] = Joueur.Joueur(1,\"Joueur1\",d1,[],zones_deploiement1,self)\n self._joueurs[2] = Joueur.Joueur(2,\"Joueur2\",d2,[],zones_deploiement2,self)\n #Creation des cartes es rois\n self._roi_joueur = self.create_card(0,1) #Index + proprietaire\n self._roi_adverse = self.create_card(0,2)\n #Mise en place des rois chez chaque joueur\n self._joueurs[1].focus_roi(self._roi_joueur)\n self._joueurs[2].focus_roi(self._roi_adverse)\n #Positionnement des rois sur le terrain\n milieu = int(self._terrain._taille/2)\n marge = (1 - self._terrain._taille%2)\n self._terrain.add_card(milieu,0,self._joueurs[2]._roi)\n self._terrain.add_card(milieu-marge,-1,self._joueurs[1]._roi)\n #Affichage graphique\n self._terrain.display()#Affiche la grille qui va gérer les evt avec les clicks\n self._joueurs[1]._main.display()#Affiche la main du joueur 1\n self._joueurs[2]._main.display()#Affiche la main du joueur 2\n self._joueurs[1]._deck.shuffle()#Melange le deck du joueur 1\n self._joueurs[2]._deck.shuffle()#Melange le deck du joueur 2\n # Cartes en main de base\n for i in range(2): #On fait piocher 2 carte a chaque joueur\n self._joueurs[1].pioche()\n self._joueurs[2].pioche()\n self._joueurs[2].pioche() #Le joueur 2 qui jouera en 2e commence avec une carte de plus\n # Tour du j1\n self.tour_joueur(1) # On passe au tour du joueur 1\n \n def create_deck_database(self):\n \"\"\" Charge la bdd dans les attributs _deck_names et _decks_cards \"\"\"\n cnx = sq.connect(\"decks.sq3\") #On charge la db\n curseur = cnx.cursor() #On crée un curseur pour parcourir la db\n curseur.execute(\"SELECT * FROM deck\") #On selectionne toute la db\n datas = curseur.fetchall()#On charge la db dans 'datas'\n self._decks_names = [] #On indique la nature des attributs pour pouvoir utiliser 'append'\n self._decks_cards = []\n for i in range(len(datas)): #On parcours les données de la db (chargée dans datas) et on ajoute deck par deck leurs nom et données aux attributs _deck_names et _deck_cards. Les decks de la db sont sous cette forme : deck = [nom,liste des cartes]\n self._decks_names.append(list(datas)[i][0]) #Le nom est au debut du deck quand il est crée (index 0)\n self._decks_cards.append(eval(list(datas)[i][1])) #Les cartes du deck sont dans une liste d'index 1. \n print(\"DECK DATABASE LOADED\",self._decks_names)#On informe que la db est chargée et on montre les noms des decks\n cnx.close() #On ferme la db (on en a plus besoin)\n \n def create_card_database(self):\n \"\"\" Creation du tableau contenant toutes les cartes \"\"\"\n #Idem qu'avec les decks\n self._cards = []\n cnx = sq.connect(\"cartes.sq3\")\n curseur = cnx.cursor()\n curseur.execute(\"SELECT * FROM cartes\")\n datas = curseur.fetchall()\n for i in range(len(datas)):\n self._cards.append(list(datas[i]))\n self._cards[i][4] = eval(self._cards[i][4])#Les paterns sont stockés sous forme de texte donc on le lance comme un script python pour qu'il se transforme en liste\n self._cards[i][5] = eval(self._cards[i][5])#idem\n print(\"CARD DATABASE LOADED\",self._cards)\n\n def create_card(self,index,proprietaire):\n \"\"\" Creation d'un objets cartes \"\"\"\n #On charge les données de la carte en fonction de l'index donné depuis la db\n nom = self._cards[index][0]\n mana = self._cards[index][1]\n attaque = self._cards[index][2]\n vie = self._cards[index][3]\n patern_atk = self._cards[index][4]\n patern_mvt = self._cards[index][5]\n sprite = self._cards[index][6]\n jeton = self._cards[index][7]\n sprite_dos = self._cards[index][8]\n return Carte.Carte(self,nom,mana,attaque,vie,patern_atk,patern_mvt,jeton,sprite,sprite_dos,proprietaire) #On retourne la carte avec tous les attributs conformes à la db (on pourrait ainsi créer une nouvelle carte depuis ici dans qu'elle ne figure dans la db si on modifiait le code)\n\n def create_terrain_points(self,taille_terrain):\n \"\"\" Creer automatiquement un terrain de points de mana conventionnel \"\"\"\n tabl1 = mu.array(taille_terrain,taille_terrain,1) #Methode dans Methodes_utiles (importé au debut du prog) qui crée un tableau de taille 'taille_terrain'*'taille_terrain' de valeur pour chaque case : 1\n tabl2 = mu.array(taille_terrain/2,taille_terrain/2,1)#meme genre\n tabl3 = mu.array(2-taille_terrain%2,2-taille_terrain%2,1)#meme genre\n a = 0\n b = 0\n for j in range(int(taille_terrain/4),int(3*taille_terrain/4)):#On additione le 2e tableau au 1er de facon a ce qu'il soit centré\n for i in range(int(taille_terrain/4),int(3*taille_terrain/4)):\n tabl1[j][i] += tabl2[a][b]\n a += 1\n b += 1\n a = 0\n a = 0\n b = 0\n for j in range(int(taille_terrain/2-len(tabl3)/2),int(taille_terrain/2+len(tabl3)/2)): #on additionne le 3e tableau a la somme des 2 1ers de facon a ce qu'il soit centré aussi\n for i in range(int(taille_terrain/2-len(tabl3)/2),int(taille_terrain/2+len(tabl3)/2)):\n tabl1[j][i] += tabl3[a][b]\n a += 1\n b += 1\n a = 0\n return tabl1 #On retourne la somme des 3 tableaux. On pourrait aussi en faire un personnalisé\n \n def tour_joueur(self,index):\n \"\"\" Change de tour des joueurs \"\"\"\n if index == 1: #Commenc le tour du j1\n self._joueur_tour = 1 #L'indique dans l'attribut\n self._joueurs[1].begin_tour() #Commence le tour du j1\n self._joueurs[2].end_tour() #Termine le tour du j2\n else: #Meme logique ci dessous\n self._joueur_tour = 2\n self._joueurs[2].begin_tour()\n self._joueurs[1].end_tour()\n \n def switch_turn(self): #On inverse les tours\n \"\"\" Inverse les tours \"\"\"\n if self._joueurs[1]._tour == True: #Si c'est le tour du j1, on passe a celui du j2 avec la methodes precedente\n self.tour_joueur(2)\n else:# Sinon, on fait l'inverse\n self.tour_joueur(1)\n \ngame = Game()\ngame.start()\n","repo_name":"Graynyn/Game","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":9277,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"2203430311","text":"import irc\nimport sys\nfrom irc import client\n\n\nmy_nickname = \"daytona6751\"\nchannels = \"#root-me_challenge\"\n\nc= irc.client.ServerConnection(irc.client.Reactor())\nc.buffer_class = irc.buffer.LineBuffer\nc.buffer_class.errors = 'replace'\ntry:\n b = c.reactor.server().connect(\"irc.root-me.org\",6667,my_nickname,channels)\nexcept irc.client.ServerConnectionError as x:\n print(x)\n sys.exit(1)\nb.join(\"#root-me_challenge\")\nb.is_connected()\nb.privmsg(\"daytona675\",\"hello\")\nb.socket.recvmsg(2048)\n\n","repo_name":"J4son/python","sub_path":"irc_client.py","file_name":"irc_client.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"10174793203","text":"import sys\n\n# sys.stdin = open(\"input.txt\", \"r\")\nn = int(input())\n\nfor i in range(n):\n a = str(input())\n a = a.upper()\n for j in range(0, int(len(a)/2) + 1, 1):\n str_leng = int(len(a)) - 1\n if(a[j] != a[str_leng - j]):\n print(\"#%d NO\" %(i + 1))\n break\n if(j == int(len(a)/2)):\n print(\"#%d YES\" %(i + 1))\n","repo_name":"IngenieurSong/algorithm_study","sub_path":"search/inflearn/1/answer_1.py","file_name":"answer_1.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"37530783358","text":"import sys\nimport math\nimport random\nimport heapq\nimport cPickle\nimport Orange\nimport neural\nfrom itertools import imap\nfrom operator import itemgetter\nfrom Orange.evaluation.testing import learn_and_test_on_test_data, test_on_data\nfrom Orange.classification.svm import kernels\n\nsys.path.append('../')\n\nfrom utils.cSimilarity import *\nfrom utils.ensemble import *\nfrom utils.distribution import *\nimport sdistances\nimport evaluate\n\nclass OrangeRandom(random.Random):\n \n def __init__(self, orngRand):\n self.rand = orngRand\n \n def randint(self, a, b):\n if b <= a:\n return 0\n return self.rand(b - a) + a\n\nrand = Orange.misc.Random(0)\n\n# Global options\nMETRIC = euclidean\nLEARNING_PROPORTION = 0.7\nGENERALIZATION_PROPORTION = 0.5\nGENERALIZED_SETS = 20\nLEARN_SUBSETS = [1 - math.log(x, 11) for x in xrange(10, 0, -1)] # Log scale\nHDISTANCES = [i*0.05 for i in xrange(18)] + [i*0.025 for i in xrange(36,41)]\nSAMPLE_SIZE = 10\nFEATURE_SUBSETS = [1.0]\nLEARNERS = [Orange.classification.bayes.NaiveLearner(name=\"bayes\"),\n Orange.classification.knn.kNNLearner(name=\"knn\"),\n Orange.classification.svm.SVMLearner(kernel_type=kernels.RBF,\n name=\"svm_rbf\"),\n Orange.classification.svm.SVMLearner(kernel_type=kernels.Linear,\n name=\"svm_linear\"),\n Orange.classification.svm.SVMLearner(kernel_type=kernels.Polynomial,\n name=\"svm_polynomial\"),\n Orange.classification.svm.SVMLearner(kernel_type=kernels.Sigmoid,\n name=\"svm_sigmoid\"),\n Orange.classification.tree.SimpleTreeLearner(name=\"tree\"),\n neural.NeuralNetworkLearner(name=\"neural_net\",\n rand=OrangeRandom(rand)),\n Orange.classification.majority.MajorityLearner(name=\"majority\")\n ]\nLEARNERS_NAMES = [l.name for l in LEARNERS] + [\"vote\", \"wcs\", \"current_best\"]\n\ndef select_random_features(data, test_data, n, random_generator=Orange.misc.Random(0)):\n \"\"\"\n Returns new data table with n random features selected from the given table.\n \"\"\"\n features_number = len(data.domain) - 1\n if n >= features_number:\n return (data, test_data)\n indices = range(features_number)\n for i in xrange(features_number - n):\n del indices[random_generator(len(indices))]\n sel = indices + [features_number]\n return (data.select(sel), test_data.select(sel))\n\ndef select_features_proportion(data, test_data, p,\n random_generator=Orange.misc.Random(0)):\n \"\"\"\n Returns new data table with n random features selected, where\n n = len(data) * p.\n \"\"\"\n return select_random_features(data, test_data,\n int(math.ceil(len(data.domain) * p)), random_generator)\n\ndef split_dataset(data, p):\n \"\"\"\n Splits the data table according to the given proportion.\n \"\"\"\n l = len(data)\n t1 = data.get_items_ref(range(int(math.floor(p*l))))\n t2 = data.get_items_ref(range(int(math.ceil(p*l)), l))\n return (t1, t2)\n\n#def build_set_list_desc_similarity(data, set_size, metric=hamming,\n# rand=Orange.misc.Random(0)):\n# \"\"\"\n# Builds a list of subsets of data in which each consecutive subset is less\n# similar to the first one (uses utils.similarity.datasets_distance). Each\n# subset is of size S = set_size * len(data).\n# \"\"\"\n# def distance_to_s0(x):\n# return instance_dataset_distance(x, s0, metric)\n# s0, _ = split_dataset(data, set_size)\n# asc_list = sorted([(distance_to_s0(i), i) for i in data])\n# sets = [s0]\n# s_dists = [(0, i) for i in xrange(len(s0))]\n# for i in xrange(len(s0), len(asc_list)):\n# s = sets[-1].get_items(range(len(sets[-1])))\n# idx = heapq.heappop(s_dists)[1]\n# s[idx] = asc_list[i][1]\n# heapq.heappush(s_dists, (asc_list[i][0], idx))\n# sets.append(s)\n# return sets\n#\n#\n#def benchmark_generalization(data, rand):\n# # Levels: 1. Test data distance (2. Samples, 3. Learner)\n# levels = 1\n# results = {}\n# sets = build_set_list_desc_similarity(data, GENERALIZATION_PROPORTION,\n# METRIC, rand)\n# step = int(math.ceil(float(len(sets)) / GENERALIZED_SETS))\n# if step == 0:\n# fsets = sets\n# else:\n# fsets = [sets[j] for j in xrange(0,len(sets),step)]\n# if fsets[-1] != sets[-1]:\n# fsets.append(sets[-1])\n# dists = map(lambda s: datasets_distance(fsets[0], s, euclidean), fsets)\n# classifiers = map(lambda l: l(fsets[0]), LEARNERS)\n# for j in xrange(len(fsets)):\n# if not dists[j] in results:\n# results[dists[j]] = {}\n# results[dists[j]][0] = test_on_data(classifiers, fsets[j])\n# return (levels, results)\n#\n#def benchmark_data_subsets_dec_dist(data, rand):\n# # Levels: 1. Learn subset, (2. Samples, 3. Learner)\n# levels = 1\n# results = {}\n#\n# sets, dists = build_subsets_dec_dist(data)\n#\n# step = int(math.ceil(float(len(sets)) / GENERALIZED_SETS))\n# if step == 0:\n# fsets = sets\n# fdists = dists\n# else:\n# fsets = [sets[j] for j in xrange(0,len(sets),step)]\n# fdists = [dists[j] for j in xrange(0,len(sets),step)]\n# if fsets[-1] != sets[-1]:\n# fsets.append(sets[-1])\n# fdists.append(dists[-1])\n#\n# for i in xrange(len(fsets)):\n# if not fdists[i] in results:\n# results[fdists[i]] = {}\n# results[fdists[i]][0] = learn_and_test_on_test_data(LEARNERS, data.select(fsets[i], 1), data)\n# return (levels, results)\n#\n#def benchmark_data_subsets(data, rand):\n# # Levels: 1. Learn subset (2. Samples, 3. Learner)\n# \n# levels = 1\n# results = {}\n# ind = indices_gen(LEARNING_PROPORTION, rand, data)\n# learn_data = data.select(ind, 0)\n# test_data = data.select(ind, 1)\n# dlen = len(learn_data)\n# # Increasing subsets by single instances\n# for sn in xrange(1, int(LEARN_SUBSETS[0] * dlen)):\n# results[sn] = {}\n# for i in xrange(SAMPLE_SIZE):\n# sn_ldata = learn_data.select(indices_gen(sn, rand, learn_data), 0)\n# results[sn][i] = learn_and_test_on_test_data(LEARNERS,\n# sn_ldata, test_data)\n# # Increasing subsets by proportions\n# for sp in LEARN_SUBSETS:\n# sn = int(sp * dlen)\n# results[sn] = {}\n# for i in xrange(SAMPLE_SIZE):\n# sn_ldata = learn_data.select(indices_gen(sn, rand, learn_data), 0)\n# results[sn][i] = learn_and_test_on_test_data(LEARNERS,\n# sn_ldata, test_data)\n# return (levels, results)\n\ndef benchmark_data_subsets_hellinger(data, rand, conv):\n # Levels: 1. Learn subset distance (2. Samples, 3. Learner)\n \n levels = 1\n results = {}\n dlen = len(data)\n \n level = 5\n l_domain = len(data.domain)\n\n class_vals = tuple(float(i) for i in xrange(len(data.domain.class_var.values)))\n\n if level > l_domain:\n level = l_domain\n\n #n_combinations = sum(factorial(l_domain)/factorial(l)/factorial(l_domain-l)\n # for l in xrange(1, level+1))*len(class_vals)\n n_combinations = sum(factorial(l_domain)/factorial(l)/factorial(l_domain-l)\n for l in xrange(1, level+1))\n\n ddata = Orange.data.discretization.DiscretizeTable(data,\n method=Orange.feature.discretization.EqualWidth(n=len(data)/10))\n ddata = np.array([tuple(float(d[i]) for i in xrange(len(ddata.domain))) for d in ddata])\n #data = np.array([tuple(float(d[i]) for i in xrange(len(data.domain))) for d in data])\n ddata_distr = JointDistributions(ddata)\n\n\n dd_sq_vals = combined_distribution(ddata_distr, level, ddata)\n dd_sq_vals /= len(ddata)\n dd_sq_vals = np.sqrt(dd_sq_vals)\n\n mean_dist = None\n\n def get_current_CA(classifier):\n if mean_dist:\n return results[mean_dist][classifier.name]['CA'][0]\n else:\n return 1.0\n\n for sp in HDISTANCES:\n sn = conv.subset_size(sp)\n sample_results = {}\n dists = []\n for i in xrange(SAMPLE_SIZE):\n ind = random.sample(xrange(len(data)), int(sn))\n sn_data = data.get_items(ind)\n sn_ddata = ddata[ind]\n \n # Calculating Hellinger distance.\n sn_ddata_distr = JointDistributions(sn_ddata)\n sd_vals = combined_distribution(sn_ddata_distr, level, ddata)\n sd_vals /= sn\n r = np.sqrt(sd_vals) - dd_sq_vals\n dist = np.sqrt(np.sum(np.multiply(r,r))/2/n_combinations)\n dists.append(dist)\n\n #print fano_min_error(sn_ddata_distr, len(ddata.domain.class_var.values))\n \n classifiers = [l(sn_data) for l in LEARNERS]\n for j in xrange(len(LEARNERS)):\n classifiers[j].name = LEARNERS[j].name\n\n majority_vote = MajorityVoteClassifier(list(classifiers), name=\"vote\")\n wcs = WeightedConfidenceSharingClassifier(list(classifiers), name=\"wcs\")\n current_best = BestDecidesClassifier(list(classifiers), get_current_CA,\n name=\"current_best\")\n classifiers.append(majority_vote)\n classifiers.append(wcs)\n classifiers.append(current_best)\n\n CAs = Orange.evaluation.scoring.CA(test_on_data(classifiers, data))\n sample_results[i] = {}\n for idx, classifier in enumerate(LEARNERS_NAMES):\n sample_results[i][classifier] = {}\n sample_results[i][classifier]['CA'] = CAs[idx]\n \n mean_results = evaluate.dict_recur_mean_err(sample_results.values())\n mean_dist = float(sum(dists))/SAMPLE_SIZE\n results[mean_dist] = mean_results\n\n return (levels, results)\n\nif __name__ == '__main__':\n\n if len(sys.argv) > 1:\n data_file = sys.argv[1]\n else:\n data_file = \"iris\"\n\n data = Orange.data.Table(data_file)\n\n conv = sdistances.HDistanceConverter(\"subset_plots/{0}.tab\".format(data_file))\n\n #levels, results = benchmark_data_subsets(data, rand)\n #levels, results = benchmark_generalization(data, rand)\n #levels, results = benchmark_data_subsets_dec_dist(data, rand)\n levels, results = benchmark_data_subsets_hellinger(data, rand, conv)\n\n data_path = \"{0}_results.pkl\".format(data_file)\n data_file = open(data_path, \"wb\")\n cPickle.dump(LEARNERS_NAMES, data_file)\n cPickle.dump(results, data_file)\n data_file.close()\n","repo_name":"zubekj/meta-learning-evaluation","sub_path":"src/experiments_real_data/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":10739,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"15"} +{"seq_id":"31819496318","text":"\"\"\"Работа с доходами — их добавление, удаление, статистика.\"\"\"\r\nimport datetime\r\nimport re\r\nimport pytz\r\nfrom typing import List, NamedTuple, Optional\r\n\r\n\r\nimport exceptions\r\nfrom database import DataBase\r\nfrom categories import SubCategories\r\n\r\n\r\nclass Message(NamedTuple):\r\n \"\"\"Структура распаршенного сообщения о новом доходе.\"\"\"\r\n amount: int\r\n sub_category_text: str\r\n\r\n\r\nclass Income(NamedTuple):\r\n \"\"\"Структура добавленного в БД нового дохода.\"\"\"\r\n id: Optional[int]\r\n amount: int\r\n sub_category_name: str\r\n\r\n\r\ndef add_income(raw_message: str) -> Income:\r\n \"\"\"Добавляет новый доход.\r\n Принимает на вход текст сообщения, пришедшего в бот.\"\"\"\r\n\r\n parsed_message = _parse_message(raw_message)\r\n sub_category = SubCategories().get_sub_category(\r\n parsed_message.sub_category_text)\r\n db = DataBase()\r\n db.insert(\"transactions\", {\r\n \"amount\": parsed_message.amount,\r\n \"created\": _get_now_formatted(),\r\n \"sub_category\": sub_category.codename,\r\n \"raw_text\": raw_message\r\n })\r\n return Income(id=None,\r\n amount=parsed_message.amount,\r\n sub_category_name=sub_category.name)\r\n\r\n\r\ndef get_today_income_statistics() -> str:\r\n \"\"\"Возвращает строкой статистику доходов за сегодня.\"\"\"\r\n\r\n db = DataBase()\r\n cursor = db.get_cursor()\r\n # Все доходы за день\r\n cursor.execute(\"select sum(amount) \"\r\n \"from transactions where date(created)=date('now', 'localtime') \"\r\n \"and sub_category in (select codename \"\r\n \"from sub_categories where category='income') \")\r\n result = cursor.fetchone()\r\n if not result[0]:\r\n return \"Сегодня ещё нет доходов\"\r\n today_incomes = result[0]\r\n return (f\"Доходы сегодня:\\n\"\r\n f\"{today_incomes} руб. из {_get_budget_limit()} руб.\\n\\n\"\r\n f\"За текущий месяц: /month_incomes\")\r\n\r\n\r\ndef get_month_income_statistics() -> str:\r\n \"\"\"Возвращает строкой статистику доходов за текущий месяц.\"\"\"\r\n\r\n now = _get_now_datetime()\r\n first_day_of_month = f'{now.year:04d}-{now.month:02d}-01'\r\n db = DataBase()\r\n cursor = db.get_cursor()\r\n cursor.execute(\"select sum(amount) \"\r\n f\"from transactions where date(created) >= '{first_day_of_month}' \"\r\n \"and sub_category in (select codename \"\r\n \"from sub_categories where category='income') \")\r\n result = cursor.fetchone()\r\n if not result[0]:\r\n return \"В этом месяце ещё нет доходов\"\r\n month_incomes = result[0]\r\n return (f\"Доходы в текущем месяце:\\n\"\r\n f\"{month_incomes} руб. из {_get_budget_limit()} руб.\\n\\n\"\r\n f\"За сегодня: /today_incomes\")\r\n\r\n\r\ndef last_income() -> List[Income]:\r\n \"\"\"Возвращает последние несколько доходов.\"\"\"\r\n\r\n db = DataBase()\r\n cursor = db.get_cursor()\r\n cursor.execute(\r\n \"select t.id, t.amount, s.name \"\r\n \"from transactions t left join sub_categories s \"\r\n \"on s.codename=t.sub_category \"\r\n \"where s.category='income' \"\r\n \"order by created desc limit 5\")\r\n rows = cursor.fetchall()\r\n last_incomes = [Income(id=row[0], amount=row[1], sub_category_name=row[2]) for row in rows]\r\n return last_incomes\r\n\r\n\r\ndef delete_income(row_id: int) -> None:\r\n \"\"\"Удаляет доход по его идентификатору.\"\"\"\r\n\r\n db = DataBase()\r\n db.delete(\"transactions\", row_id)\r\n\r\n\r\ndef _parse_message(raw_message: str) -> Message:\r\n \"\"\"Парсит текст пришедшего сообщения о новом доходе.\"\"\"\r\n\r\n regexp_result = re.match(r\"(.*) ([\\d ]+) (.*)\", raw_message)\r\n if not regexp_result or not regexp_result.group(0) \\\r\n or not regexp_result.group(1) or not regexp_result.group(2) or not regexp_result.group(3):\r\n raise exceptions.NotCorrectMessage(\r\n \"Не могу понять сообщение. Напишите сообщение в формате, \"\r\n \"например:\\nдоход 20000 зп\")\r\n amount = int(regexp_result.group(2).replace(\" \", \"\"))\r\n sub_category_text = regexp_result.group(3).strip().lower()\r\n return Message(amount=amount, sub_category_text=sub_category_text)\r\n\r\n\r\ndef _get_now_formatted() -> str:\r\n \"\"\"Возвращает сегодняшнюю дату строкой\"\"\"\r\n return _get_now_datetime().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n\r\n\r\ndef _get_now_datetime() -> datetime.datetime:\r\n \"\"\"Возвращает сегодняшний datetime с учётом времненной зоны Мск.\"\"\"\r\n tz = pytz.timezone(\"Europe/Moscow\")\r\n now = datetime.datetime.now(tz)\r\n return now\r\n\r\n\r\ndef _get_budget_limit() -> int:\r\n \"\"\"Возвращает месячный лимит трат для основных базовых трат\"\"\"\r\n \r\n # TODO: Заменить передачу данных на словарь с лимитами\r\n db = DataBase()\r\n return db.fetchall(\"budgets\", [\"month_limit\"])[5][\"month_limit\"]\r\n","repo_name":"ADDrey/telegram-bot-in-docker","sub_path":"incomes.py","file_name":"incomes.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"31727834566","text":"\"\"\"\nDeep policy network implemented by Junxiao Song\nOriginal repo: https://github.com/junxiaosong/AlphaZero_Gomoku.git\n\"\"\"\nfrom __future__ import print_function\nimport tensorflow as tf\nimport keras\nimport numpy as np\nimport random\n\nimport pickle as pickle\nfrom collections import defaultdict, deque\nfrom env.board import Board, Game\nfrom agent.mcts import MCTSPlayer\nfrom agent.pure_mcts import MCTSPlayer as Pure_MCTS\n\nclass PolicyValueNet():\n \"\"\"\n policy value net\n \"\"\"\n\n #if want to play with AI, set \"is_self_play = 0\"\n def __init__(self, board_width, board_height, net_params_path = None, is_self_play = 1):\n\n # init network parameters\n self.learning_rate = 5e-3\n self.l2_const = 1e-4 #coef of l2 penalty\n self.lr_multiplier = 1.0\n self.temp = 1.0 #temporary parameter\n self.n_playout = 400 # number of simulations for each move\n self.c_puct = 5\n self.buffer_size = 10000\n self.batch_size = 512 # number of mini-batch \n self.data_buffer = deque(maxlen=self.buffer_size)\n self.play_batch_size = 1\n self.epochs = 5 #number of train step for each update\n self.kl_targ = 0.025\n self.check_freq = 50\n self.game_batch_num = 1500\n self.best_win_ratio = 0.0\n self.pure_mcts_playout_num = 1000\n \n # initial env\n self.board = Board()\n self.game = Game(self.board)\n self.board.init_board()\n self.board_width = board_width\n self.board_height = board_height\n\n self.create_policy_value_net()\n self._loss_train_op()\n\n if net_params_path:\n self.model.load_weights(net_params_path)\n \n #init mcts player\n self.mcts_player = MCTSPlayer(self.policy_value_fn, c_puct = self.c_puct, n_playout = self.n_playout, is_selfplay = is_self_play)\n\n\n def create_policy_value_net(self):\n self.state_input = keras.Input(shape=(4, self.board_width, self.board_height))\n self.winner = keras.Input(shape = (1, ))\n self.mcts_probs = keras.Input(shape = (64, ))\n\n # keras conv layers\n conv1 = keras.layers.convolutional.Conv2D(filters = 32, kernel_size = (3, 3), padding='same', activation='relu')(self.state_input)\n conv2 = keras.layers.convolutional.Conv2D(filters = 64, kernel_size = (3, 3), padding='same', activation='relu')(conv1)\n conv3 = keras.layers.convolutional.Conv2D(filters = 128, kernel_size = (3, 3), padding='same', activation='relu')(conv2)\n\n #regularization teams\n l2_penalty = keras.regularizers.l2(self.l2_const)\n\n #keras policy network\n policy_net1 = keras.layers.convolutional.Conv2D(filters = 4, kernel_size = [1, 1], activation='relu')(conv3)\n policy_net2 = keras.layers.Flatten()(policy_net1)\n self.policy_net = keras.layers.Dense(units = self.board_width * self.board_height, activation='softmax', activity_regularizer=l2_penalty)(policy_net2)\n\n #keras state value layers\n value_layer1 = keras.layers.convolutional.Conv2D(filters = 2, kernel_size = [1, 1], activation='relu')(conv3)\n value_layer2 = keras.layers.Flatten()(value_layer1)\n value_layer3 = keras.layers.Dense(units = self.board_width*self.board_height, activation='relu')(value_layer2)\n self.value_net = keras.layers.Dense(units = 1, activation='tanh', activity_regularizer=l2_penalty)(value_layer3)\n\n self.model = keras.engine.training.Model(input = self.state_input, outputs = [self.policy_net, self.value_net])\n\n\n def _loss_train_op(self):\n #There are three loss terms:\n #loss = (z - v)^2 + pi^T * log(p) + c||theta||^2\n \n losses = [loss_function_for_policy, loss_function_for_value]\n optimizer = keras.optimizers.Adam(lr=self.learning_rate * self.lr_multiplier)\n self.model.compile(optimizer=optimizer, loss=losses)\n\n def policy_value(self, state_input):\n \"\"\"\n This function predict the action probability and value\n input: state\n output: action probability and value\n \"\"\"\n\n act_probs, value = self.model.predict(state_input)\n return act_probs, value\n\n def policy_value_fn(self, board):\n \"\"\"\n input: board\n output: a list of (action, probability) tuples for each available aciton and the score of the board state\n \"\"\"\n legal_position = board.get_avalible_move()\n current_state = board.get_current_state()\n act_probs, value = self.policy_value(current_state.reshape(-1, 4, self.board_width, self.board_height))\n act_probs = zip(legal_position, act_probs.flatten()[legal_position])\n return act_probs, value[0][0]\n \n def get_extend_data(self, play_data):\n \"\"\"\n augment the data by rotation and flipping\n play_data:[(state, mcts_prob, winner_z),..., ..., ...]\n \"\"\"\n extend_data = []\n for state, mcts_prob, winner in play_data:\n for i in [1, 2, 3, 4]:\n # rotation\n equi_state = np.array([np.rot90(s ,i) for s in state])\n equi_mcts_prob = np.rot90(np.flipud(mcts_prob.reshape(self.board_height,self.board_width)),i)\n extend_data.append((equi_state, np.flipud(equi_mcts_prob).flatten(), winner))\n #flip horizontally\n equi_state = np.array([np.fliplr(s) for s in equi_state])\n equi_mcts_prob = np.fliplr(equi_mcts_prob)\n extend_data.append((equi_state, np.flipud(equi_mcts_prob).flatten(), winner))\n return extend_data\n\n def collect_selfplay_data(self, n_games = 1):\n \"\"\"\n collect self-play data for training\n \"\"\"\n for i in range(n_games):\n winner, play_data = self.game.start_self_play(self.mcts_player, temp=self.temp)\n self.episode_len = len(play_data)\n #augment the data\n play_data = self.get_extend_data(play_data)\n self.data_buffer.extend(play_data)\n \n #TODO: two model aggregation\n def policy_update(self):\n mini_batch = random.sample(self.data_buffer, self.batch_size)\n\n state_batch = np.array([data[0] for data in mini_batch]).reshape(-1, 4, 8, 8)\n mcts_probs_batch = np.array([data[1] for data in mini_batch])\n winner_batch = np.array([data[2] for data in mini_batch])\n old_probs, old_v = self.policy_value(state_batch)\n\n\n self.winner = winner_batch\n self.mcts_probs = mcts_probs_batch\n\n self._loss_train_op()\n\n for i in range(self.epochs):\n history = self.model.fit(state_batch, [mcts_probs_batch, winner_batch], epochs=self.epochs)\n loss = history.history['loss']\n new_probs, new_v = self.policy_value(state_batch)\n kl = np.mean(np.sum(old_probs * (np.log(old_probs + 1e-10) - np.log(new_probs + 1e-10)),axis=1))\n if kl > self.kl_targ * 4:\n break \n if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:\n self.lr_multiplier /= 1.5\n elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:\n self.lr_multiplier *= 1.5\n\n explained_var_old = 1 - np.var(np.array(winner_batch) - old_v.flatten())/np.var(np.array(winner_batch))\n explained_var_new = 1 - np.var(np.array(winner_batch) - new_v.flatten())/np.var(np.array(winner_batch)) \n\n print(\"kl:{:.5f},lr_multiplier:{:.3f},loss:{},explained_var_old:{:.3f},explained_var_new:{:.3f}\".format(\n kl, self.lr_multiplier, loss, explained_var_old, explained_var_new))\n\n return loss\n\n\n def policy_evaluate(self, n_games = 10):\n \"\"\"\n Evaluate the trained policy by playing games against thr pure MCTS player\n Only for monitoring the progress of training\n \"\"\"\n current_mcts_player = MCTSPlayer(self.policy_value_fn,c_puct = self.c_puct, n_playout = self.n_playout)\n pure_mcts_player = Pure_MCTS(c_puct=5, n_playout=self.pure_mcts_playout_num)\n win_count = defaultdict(int)\n for i in range(n_games):\n winner = self.game.start_play(current_mcts_player, pure_mcts_player)\n win_count[winner] += 1\n win_ratio = 1.0 * (win_count[1] + 0.5*win_count[0])/n_games\n print(\"num_playout: {}, win: {}, loss:{}, tie: {}\".format(self.pure_mcts_playout_num, win_count[1], win_count [-1], win_count[0]))\n return win_ratio\n \n def get_policy_param(self):\n \"\"\"\n return the parameters of both policy and value network\n \"\"\"\n policy_net_parameters = []\n for layer in self.policy_net_model.layers:\n policy_net_parameters.append(layer.get_weights())\n \n value_net_parameters = []\n for layer in self.value_net_model.layers:\n value_net_parameters.append(layer.get_weights())\n \n return [policy_net_parameters, value_net_parameters]\n\n def run(self):\n \"\"\"\n run the training pipline\n \"\"\"\n try:\n for i in range(self.game_batch_num):\n self.collect_selfplay_data(self.play_batch_size)\n print(\"batch i:{}, episode_len:{}\".format(i+1, self.episode_len))\n if len(self.data_buffer) > self.batch_size:\n loss = self.policy_update()\n #check the performance of the current model, and save the model parameters\n if (i+1) % self.check_freq == 0:\n print(\"current self-play batch: {}\".format(i+1))\n win_ratio = self.policy_evaluate()\n # net_params = self.get_policy_param() #get model parameters\n # pickle.dump(net_params, open('./model/current_policy.model','wb'),pickle.HIGHEST_PROTOCOL) # save model parameters to file\n self.model.save(\"./model/current_model.h5\")\n if win_ratio > self.best_win_ratio:\n print(\"New best policy get!\")\n self.best_win_ratio = win_ratio\n # pickle.dump(net_params, open('./model/best_policy.model','wb'), pickle.HIGHEST_PROTOCOL) # update the best policy\n self.model.save(\"./model/best_policy_model.h5\")\n if self.best_win_ratio == 1.0 and self.pure_mcts_playout_num < 5000:\n self.pure_mcts_playout_num += 1000\n self.best_win_ratio = 0.0\n except KeyboardInterrupt:\n print('\\n\\rquit')\n\ndef loss_function_for_policy(y_true, y_pred):\n return keras.losses.categorical_crossentropy(y_true, y_pred)\n\ndef loss_function_for_value(y_true, y_pred):\n return keras.losses.mean_squared_error(y_true, y_pred)\n \nif __name__ == '__main__':\n training_pipeline = PolicyValueNet(8, 8, './model/current_model.h5')\n training_pipeline.run()\n","repo_name":"remmusSummer/Omega-Reversi","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10910,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"15"} +{"seq_id":"28439293517","text":"import pigpio\nfrom time import sleep\n\n#initializiation code\npi = pigpio.pi()\n\nif not pi.connected:\n exit(0)\n\npin = 4\npi.set_mode(pin, pigpio.OUTPUT)\n\nwhile True:\n try:\n #pin number : int , state : bool \n pi.write(pin, False)\n print(\"turned on\")\n sleep(1)\n pi.write(pin, True)\n print(\"turned off\")\n sleep(1)\n except KeyboardInterrupt:\n print(\"Manually Exited.\")\n break\n\n\nprint(\"I'm done\")\n","repo_name":"hoang-danny05/raspberrypi","sub_path":"Thermocouple/new_gpio.py","file_name":"new_gpio.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"37386642214","text":"#----------* CHALLENGE 56 *----------\r\n# Randomly pick a whole number between 1 and 10. Ask the user to enter a number and \r\n# keep entering numbers until they enter the number that was randomly picked.\r\n\r\nimport random\r\n\r\nnumRandom = random.randint(1,10)\r\nnum = int(input(\"Enter a number: \"))\r\n\r\nwhile num != numRandom:\r\n num = int(input(\"Enter a number again: \"))\r\n\r\nprint(\"Correct!\")","repo_name":"lilimonroy/randomChallenges052-059","sub_path":"Challenge056.py","file_name":"Challenge056.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"33300610409","text":"import torch.nn as nn\nimport torch\nimport copy\nimport torch.nn.init as init\n\nclass RF_VAE2(nn.Module):\n \"\"\"Encoder and Decoder architecture for 3D Shapes, Celeba, Chairs data.\n Taken entirely from github.com/ThomasMrY/RF-VAE\"\"\"\n def __init__(self, z_dim=10):\n super(RF_VAE2, self).__init__()\n self.z_dim = z_dim\n self.encode = nn.Sequential(\n nn.Conv2d(3, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(32, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(32, 64, 4, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(64, 64, 4, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(64, 256, 4, 1),\n nn.ReLU(True),\n nn.Conv2d(256, 2*z_dim, 1)\n )\n self.decode = nn.Sequential(\n nn.Conv2d(z_dim, 256, 1),\n nn.ReLU(True),\n nn.ConvTranspose2d(256, 64, 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 64, 4, 2, 1),\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.ConvTranspose2d(32, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.ConvTranspose2d(32, 3, 4, 2, 1),\n )\n self.weight_init()\n\n def weight_init(self, mode='normal'):\n if mode == 'kaiming':\n initializer = kaiming_init\n elif mode == 'normal':\n initializer = normal_init\n\n for block in self._modules:\n for m in self._modules[block]:\n initializer(m)\n\n def reparametrize(self, mu, logvar):\n std = logvar.mul(0.5).exp_()\n eps = std.data.new(std.size()).normal_()\n return eps.mul(std).add_(mu)\n\n def forward(self, x, no_dec=False):\n stats = self.encode(x)\n mu = stats[:, :self.z_dim]\n logvar = stats[:, self.z_dim:]\n z = self.reparametrize(mu, logvar)\n\n if no_dec:\n return z.squeeze()\n else:\n x_recon = self.decode(z)\n return x_recon, mu, logvar, z.squeeze()\n\ndef kaiming_init(m):\n if isinstance(m, (nn.Linear, nn.Conv2d)):\n init.kaiming_normal_(m.weight)\n if m.bias is not None:\n m.bias.data.fill_(0)\n elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):\n m.weight.data.fill_(1)\n if m.bias is not None:\n m.bias.data.fill_(0)\n\ndef normal_init(m):\n if isinstance(m, (nn.Linear, nn.Conv2d)):\n init.normal_(m.weight, 0, 0.02)\n if m.bias is not None:\n m.bias.data.fill_(0)\n elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):\n m.weight.data.fill_(1)\n if m.bias is not None:\n m.bias.data.fill_(0)\n\ndef loadEncoder(rfvae_name, rfvae_dims):\n print(\"Loading encoder...\")\n rfvae = RF_VAE2(rfvae_dims)\n checkpoint = torch.load(rfvae_name)\n rfvae.load_state_dict(checkpoint['model_states']['VAE'])\n # remove decoder:\n encoder = rfvae.encode\n return encoder\n\nclass NeuralNetwork(nn.Module):\n def __init__(self, encoder):\n super(NeuralNetwork, self).__init__()\n self.z_dim = 10\n self.encoder = copy.deepcopy(encoder)\n # freeze encoder layers:\n i = 0\n for layer in self.encoder:\n if i < 8:\n layer.trainable = False\n i += 1\n for name, param in self.encoder.named_parameters():\n if param.requires_grad and '8' not in name and '10' not in name:\n param.requires_grad = False\n \n self.fully_connected = nn.Sequential(\n nn.Linear(10, 7),\n nn.ReLU(),\n nn.Linear(7, 4),\n nn.ReLU(),\n nn.Linear(4, 1))\n \n def reparametrize(self, mu, logvar):\n std = logvar.mul(0.5).exp_()\n eps = std.data.new(std.size()).normal_()\n return eps.mul(std).add_(mu)\n \n def forward(self, x):\n stats = self.encoder(x)\n \n mu = stats[:, :self.z_dim]\n logvar = stats[:, self.z_dim:]\n z = self.reparametrize(mu, logvar).squeeze()\n logits = self.fully_connected(z)\n return logits\n\nclass NeuralNetworkDemographics(nn.Module):\n def __init__(self, encoder):\n super(NeuralNetworkDemographics, self).__init__()\n self.z_dim = 10\n self.encoder = copy.deepcopy(encoder)\n # freeze encoder layers:\n i = 0\n for layer in self.encoder:\n if i < 8:\n layer.trainable = False\n i += 1\n for name, param in self.encoder.named_parameters():\n if param.requires_grad and '8' not in name and '10' not in name:\n param.requires_grad = False\n \n self.fully_connected = nn.Sequential(\n nn.Linear(14, 512),\n nn.ReLU(),\n nn.Linear(512, 256),\n nn.ReLU(),\n nn.Linear(256, 128),\n nn.ReLU(),\n nn.Linear(128, 1))\n \n def reparametrize(self, mu, logvar):\n std = logvar.mul(0.5).exp_()\n eps = std.data.new(std.size()).normal_()\n return eps.mul(std).add_(mu)\n \n def forward(self, x, demo):\n stats = self.encoder(x)\n mu = stats[:, :self.z_dim]\n logvar = stats[:, self.z_dim:]\n z = self.reparametrize(mu, logvar).squeeze()\n final = torch.cat((z, demo), dim=1)\n logits = self.fully_connected(final)\n return logits\n\n","repo_name":"iheallab/Clock-Drawing-Classification-With-RF_VAE","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"73136769932","text":"\"\"\"\n| The core module of Face Recognition App\n| You can run app from command line by typing\n| python main.py -e path_to_encoded_faces.pickle -d detect_method\n| detect_method is one of 'hog' or 'cnn'\n\"\"\"\n\nimport argparse\nimport pickle\nfrom threading import Thread\nimport time\nfrom custom_functions.VideoStream import CameraAndFaceRecognizerCarrier\n\n\ndef main():\n \"\"\"| Inside main function CameraAndFaceRecognizerCarrier is started to maintain camera and face recognizer module\"\"\"\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-e\", \"--encodings\", required=True,\n help=\"path to serialized db of facial encodings\")\n ap.add_argument(\"-d\", \"--detect_method\", type=str, default=\"cnn\",\n help=\"face detection model to use: either `hog` or `cnn`\")\n args = vars(ap.parse_args())\n\n data = pickle.loads(open(args[\"encodings\"], \"rb\").read())\n print('Encodings loaded.')\n\n WebCam1_Lab1 = CameraAndFaceRecognizerCarrier.CameraAndFaceRecognizerCarrier(\n camName='WebCam1', location='Lab1', encodings=data, min_time=2.5,\n src=0, detect_method=args['detect_method'], scale=0.5)\n WebCam1_Lab1.start_carrier()\n\n # example how to checking access on camera's location\n\n# def check_access():\n# while not WebCam1_Lab1.recognizer.stopped:\n# # check access every 1 sec\n# time.sleep(1.0)\n# if WebCam1_Lab1.is_access_granted():\n# print(\"{} in {}: Access granted!\".format(WebCam1_Lab1.camName, WebCam1_Lab1.location))\n\n# access_check_thread = Thread(target=check_access)\n# access_check_thread.start()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Lukaszz99/FaceDetectionApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"70789894731","text":"from flask import Flask, jsonify, make_response\n\nimport json\nimport time\nimport psutil\nimport subprocess\nimport xmltodict\n\nfrom os import geteuid, devnull\nfrom sys import exit\nfrom threading import Thread\nfrom pprint import pprint\n\napp = Flask(__name__)\n\nadapters = []\n\nFNULL = open(devnull, 'w')\n\ndef find_adapters(json_data, json_child_data):\n if type(json_child_data) is list:\n for item in json_child_data:\n find_adapters(json_data, item)\n elif type(json_child_data) is dict:\n for item, value in json_child_data.items():\n if item == 'node':\n find_adapters(json_data, value)\n elif item == 'description' and json_child_data['description'] == 'Wireless interface':\n businfo = json_child_data['businfo']\n interface = json_child_data['logicalname']\n add_adapter(json_data, businfo, interface)\n elif item == '@id' and json_child_data['@id'].startswith('usb'):\n businfo = json_child_data['businfo']\n update_adapter(businfo)\n \ndef purge_adapters():\n for adapter in adapters:\n now = time.time()\n if now - adapter['last_seen'] > 2.0:\n print(\"Removing %s\" % adapter)\n adapters.remove(adapter)\n\ndef add_adapter(json_data, businfo, interface):\n if type(json_data) is list:\n for item in json_data:\n add_adapter(item, businfo, interface)\n elif type(json_data) is dict:\n for item, value in json_data.items():\n if item == 'node':\n add_adapter(value, businfo, interface)\n elif item == '@id' and value.startswith('usb'):\n if json_data['businfo'] == businfo:\n name = json_data['vendor'] + ' ' + json_data['product']\n tag = 2000 + int(interface.replace('wlan', ''))\n\n found = update_adapter(businfo)\n \n if not found:\n try:\n adapter = {\n 'name': name,\n 'businfo': businfo,\n 'interface': interface,\n 'tag': tag,\n 'last_seen': time.time()\n }\n adapters.append(adapter)\n print(\"Adding %s\" % adapter)\n except OSError as e:\n print(\"Failed to start sensor for adapter %s: %s\" % (name, e))\n\ndef update_adapter(businfo):\n found = False\n for adapter in adapters:\n if adapter['businfo'] == businfo:\n adapter['last_seen'] = time.time()\n found = True\n\n return found\n\ndef run():\n while True:\n json_data = xmltodict.parse((subprocess.check_output([\"/usr/bin/lshw\", \"-xml\"], stderr=FNULL)).decode(\"ascii\"))\n json_data = json_data['list']\n find_adapters(json_data, json_data)\n purge_adapters()\n time.sleep(3)\n\n@app.route('/ease/api/v1.0/adapters', methods=['GET'])\ndef get_adapters():\n return jsonify({'adapters': adapters})\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\nif __name__ == '__main__':\n\n if geteuid() != 0:\n print(\"You need to have root privileges to run this script.\")\n exit(-1)\n\n thread = Thread(target = run)\n thread.daemon = True\n thread.start()\n app.run(host='0.0.0.0')\n\n","repo_name":"intuitibits/ease","sub_path":"ease.py","file_name":"ease.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"15"} +{"seq_id":"20141587526","text":"import sys\n\ninput = sys.stdin.readline\nR, C = map(int, input().split())\ndx = [-1, 0, +1, -1, +1, -1, 0, +1]\ndy = [-1, -1, -1, 0, 0, +1, +1, +1]\nground = []\n\n\ndef check(i, y, x):\n if (\n (x + dx[i] >= 0)\n and (x + dx[i] < C)\n and (y + dy[i] >= 0)\n and (y + dy[i] < R)\n and (ground[y + dy[i]][x + dx[i]] != \"*\")\n ):\n cntlst[y + dy[i]][x + dx[i]] += 1\n\n\nwhile R != 0 and C != 0:\n ground = []\n for i in range(R):\n ground.append(list(input().rstrip()))\n cntlst = [[0 for _ in range(C)] for k in range(R)]\n for i in range(R):\n for j in range(C):\n if ground[i][j] == \"*\":\n cntlst[i][j] = \"*\"\n for k in range(8):\n check(k, i, j)\n for i in range(R):\n for j in range(C):\n cntlst[i][j] = str(cntlst[i][j])\n for i in range(R):\n print(\"\".join(cntlst[i]))\n R, C = map(int, input().split())\n","repo_name":"Jeon-jisu/naver_AI_Tech_boostcamp","sub_path":"boj_practice/w20/4108.py","file_name":"4108.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"21571879444","text":"import numpy as np\nimport cv2\nimport torch\nfrom segment_anything import sam_model_registry, SamPredictor\n\ndef convert_mask_to_polygon(mask):\n contours = None\n if int(cv2.__version__.split('.')[0]) > 3:\n contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)[0]\n else:\n contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)[1]\n\n contours = max(contours, key=lambda arr: arr.size)\n if contours.shape.count(1):\n contours = np.squeeze(contours)\n if contours.size < 3 * 2:\n raise Exception('Less then three point have been detected. Can not build a polygon.')\n\n polygon = []\n for point in contours:\n polygon.append([int(point[0]), int(point[1])])\n\n return polygon\n\nclass ModelHandler:\n def __init__(self):\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.sam_checkpoint = \"/opt/nuclio/sam/sam_vit_h_4b8939.pth\"\n self.model_type = \"vit_h\"\n self.latest_image = None\n sam_model = sam_model_registry[self.model_type](checkpoint=self.sam_checkpoint)\n sam_model.to(device=self.device)\n self.predictor = SamPredictor(sam_model)\n\n def handle(self, image):\n self.predictor.set_image(np.array(image))\n features = self.predictor.get_image_embedding()\n return features\n","repo_name":"opencv/cvat","sub_path":"serverless/pytorch/facebookresearch/sam/nuclio/model_handler.py","file_name":"model_handler.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":10432,"dataset":"github-code","pt":"15"} +{"seq_id":"29895683732","text":"import sims4.commands\r\nimport services\r\nfrom protocolbuffers import Consts_pb2\r\n\r\n@sims4.commands.Command('cheats_help', command_type=sims4.commands.CommandType.Live)\r\ndef hello_world(_connection=None):\r\n output = sims4.commands.CheatOutput(_connection)\r\n output(\"add_money : add money to your current sim\")\r\n output(\"remove_money : remove money from your current sim\")\r\n output(\"max_skill : set the skill to max level\")\r\n output(\"become_friend : become friend with the target sim\")\r\n output(\"become_lover : become lover with the target sim\")\r\n\r\n# .net\r\n@sims4.commands.Command('myfirstscript', command_type=sims4.commands.CommandType.Live)\r\ndef myfirstscript(_connection=None):\r\n output = sims4.commands.CheatOutput(_connection)\r\n output(\"This is my first script mod\")\r\n #2018 https://sims4studio.com/thread/15145/started-python-scripting\r\n\r\n@sims4.commands.Command('hello', command_type=sims4.commands.CommandType.Live)\r\ndef hello_world(_connection=None):\r\n output = sims4.commands.CheatOutput(_connection)\r\n output(\"Hello World\")\r\n# https://medium.com/analytics-vidhya/the-sims-4-modern-python-modding-part-2-hello-world-77c5bfd3ce4e\r\n#2020\r\n\r\n@sims4.commands.Command('motherlode_plus', command_type=(sims4.commands.CommandType.Live),\r\n console_type=(sims4.commands.CommandType.Cheat))\r\ndef motherlode_plus(amount: int = 0, _connection=None):\r\n tgt_client = services.client_manager().get(_connection)\r\n modify_fund_helper(amount, Consts_pb2.TELEMETRY_MONEY_CHEAT, tgt_client.active_sim)\r\n\r\ndef modify_fund_helper(amount, reason, sim):\r\n if amount > 0:\r\n sim.family_funds.add(amount, reason, sim)\r\n else:\r\n sim.family_funds.try_remove(-amount, reason, sim)\r\n#https://medium.com/@lli-1990/tutorial-write-the-sims-4-script-mod-with-python-part-3-write-your-own-command-65c7ab9049b9\r\n#2022\r\n\r\n\r\n\r\n","repo_name":"jolieschae/Phase-3-Sims-4-Game-Mod","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"72542744010","text":"import streamlit as st\nimport cv2\nimport numpy as np\n\nst.title(\"Real-time Object Detection with Webcam\")\n\n# Create a video capture object for the webcam\nvideo_capture = cv2.VideoCapture(0)\n\nframePlaceholder = st.empty()\n\n# Initialize trackbar values\nhue_low = 0\nhue_high = 179\nsaturation_low = 0\nsaturation_high = 255\nvalue_low = 0\nvalue_high = 255\n\n# Create a sidebar for trackbar controls\nst.sidebar.header(\"HSV Range Selection\")\nhue_low = st.sidebar.slider(\"Hue Low\", 0, 179, 0)\nhue_high = st.sidebar.slider(\"Hue High\", 0, 179, 179)\nsaturation_low = st.sidebar.slider(\"Saturation Low\", 0, 255, 0)\nsaturation_high = st.sidebar.slider(\"Saturation High\", 0, 255, 255)\nvalue_low = st.sidebar.slider(\"Value Low\", 0, 255, 0)\nvalue_high = st.sidebar.slider(\"Value High\", 0, 255, 255)\n\nwhile True:\n # Read a frame from the webcam\n ret, frame = video_capture.read()\n \n # Convert the frame to HSV color space\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n \n # Create a mask using the selected HSV range\n lower_range = np.array([hue_low, saturation_low, value_low])\n upper_range = np.array([hue_high, saturation_high, value_high])\n mask = cv2.inRange(hsv_frame, lower_range, upper_range)\n \n # Apply the mask to the original frame\n result = cv2.bitwise_and(frame, frame, mask=mask)\n \n # Display the original frame and the detected object\n # st.image([frame, result], caption=[\"Original Frame\", \"Detected Object\"], channels=\"BGR\")\n # frame = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)\n\n framePlaceholder.image(result, channels=\"RGB\")\n","repo_name":"ZiaUrRehman-bit/Learn-Streamlit-for-Web-Application-Development","sub_path":"Example 13 (Detect Color from Webcam).py","file_name":"Example 13 (Detect Color from Webcam).py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"33684761147","text":"from django.db import models\nfrom pycountry import countries\nfrom django.utils.text import slugify\n# Create your models here.\n# define the model classes for this app \n# model classes pull out data from the database and present it to the user\n\n# ! on_delete signifies a special way of removing items\n\n# exception resolved by deleting db, makemigrations and migrate\n\nclass Author(models.Model):\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n occupation = models.CharField(max_length=100)\n biography = models.TextField() \n slug = models.SlugField(unique = True, null = False, blank = True)\n joined = models.DateTimeField(auto_now_add = True)\n\n def __str__(self):\n return f'{self.first_name} {self.last_name}'\n \n class Meta:\n ordering = ['-joined']\n\n def save(self, *args, **kwargs):\n self.slug = slugify(f'{self.first_name} {self.last_name}')\n return super().save(*args, **kwargs)\n\n\nclass Region(models.Model):\n country_options = sorted([(country.name,country.name) for country in countries])\n country = models.CharField(max_length= 200, choices = country_options)\n town = models.CharField(max_length=100)\n added = models.DateTimeField(auto_now_add = True)\n slug = models.SlugField(unique = True, null = False, blank = True)\n\n def __str__(self):\n return f'{self.country}, {self.town}'\n\n class Meta:\n ordering = ['-added']\n \n def save(self, *args, **kwargs):\n self.slug = slugify(f'{self.country} {self.town}')\n return super().save(*args, **kwargs)\n\nclass Article(models.Model):\n title = models.CharField(max_length=200, unique = True)\n slug = models.SlugField(unique = True, null = False, blank = True)\n created_on = models.DateTimeField(auto_now_add = True)\n updated_on = models.DateTimeField(auto_now = True)\n author = models.ForeignKey(Author, on_delete= models.CASCADE)#, blank = True, null = True)\n regions = models.ManyToManyField(Region, related_name='regions')#blank = True) \n content = models.TextField()\n\n class Meta:\n ordering = ['-created_on']\n\n def __str__(self):\n return self.title\n \n def save(self, *args, **kwargs):\n self.slug = slugify(self.title)\n return super().save(*args, **kwargs)","repo_name":"Amundeep-Dhaliwal/articles_collection","sub_path":"playground/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"22072816532","text":"#\nimport argparse\nfrom pprint import pprint\nimport numpy as np\nfrom glob import glob\nimport pandas as pd\nimport os\nimport random\nimport numpy as np\nimport torch\nimport pytorch_lightning as pl\nfrom pytorch_lightning import seed_everything\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\nfrom PIL import Image\n\nfrom dataset import *\nfrom model import *\nfrom evaluate import *\n\nfrom captum.attr import IntegratedGradients\nfrom captum.attr import GradientShap\nfrom captum.attr import Saliency\nfrom captum.attr import NoiseTunnel\nfrom captum.attr import visualization as viz\n\nfrom torch import nn\nfrom PIL import ImageDraw, ImageFont\nimport PIL\nfrom captum.attr import Occlusion\n\n\ndef mycropImage(img):\n #img = images[5][\"rtC\"]\n k = img[:,:,0]\n try:\n cut0 = np.where(np.std(k, axis = 1) < 10)[0][0]\n except:\n return img\n\n # is the top empty?\n if cut0 < 32:\n try:\n cut0 = np.where(np.std(k, axis = 1) < 10)[0]\n lidx = np.where((cut0[1:] - cut0[:-1]) > 2)[0][0]\n kimg = k.copy()*0\n kimg[0:k.shape[0]-cut0[lidx]] = k[cut0[lidx]:,:]\n k = kimg.copy()\n nimg = img.copy()*0\n nimg[0:k.shape[0]-cut0[lidx],:] = img[cut0[lidx]:,:,:]\n img = nimg.copy()\n except Exception as e:\n print(e)\n return img\n try:\n cut1 = np.where(np.std(k, axis = 1) < 10)[0][0]\n except Exception as e:\n return img\n\n cuth2 = (k.shape[0] - cut1)//2\n if cuth2 > 128:\n cut1 = k.shape[0] - 222\n cuth2 = 111\n\n k = img[0:cut1, cuth2:(k.shape[1]-cuth2), :]\n k = cv2.resize(k, (512, 512))\n return (k)\n\n\nif __name__ == '__main__':\n # get best params of best model = AMR\n tResults = pd.read_csv(\"./results/optuna.results.AMR.csv\")\n bestModel = tResults[tResults.value == np.min(tResults.value)].iloc[0]\n best_params = {k.replace(\"params_\", ''):bestModel[k] for k in bestModel.keys() if \"params_\" in k}\n for k in best_params:\n exec(k + \"=\" + repr(best_params[k]))\n LAMBDA_1 = 0.2 # no tuning\n LAMBDA_2 = LAMBDA_2/100\n gamma = gamma/10\n headSize = [2**nd1, 2**nd2, 2**nd3]\n\n dm = ScoutViewDataModule(returnLabel = True, data_path = \"./data\", image_path = \"./images\", imgSize = imgSize, returnAsDict = False)\n dm.setup(\"final\")\n\n lightning_model = TopoAge (learning_rate = lr, model = modelname, headSize = headSize,\n freeze = freeze, num_classes = NUM_CLASSES + 1, flatten = False, loss = \"AMR\",\n LAMBDA_1 = LAMBDA_1, LAMBDA_2 = LAMBDA_2, K = K,\n step_size = step_size, gamma = gamma)\n\n model_path = os.path.join(\"./checkpoints\", \"AMR\", \"final\", \"logs\", \"version_0\", \"checkpoints\", \"*\")\n model_path = glob(model_path)[0]\n model = lightning_model.load_from_checkpoint(model_path, learning_rate = lr, model = modelname, headSize = headSize,\n freeze = freeze, num_classes = NUM_CLASSES + 1, flatten = False, loss = \"AMR\",\n LAMBDA_1 = LAMBDA_1, LAMBDA_2 = LAMBDA_2, K = K,\n step_size = step_size, gamma = gamma)\n\n _ = model.cuda()\n _ = model.eval()\n\n # strange place, but trained already. print best model parameters size\n print(summary(model, input_size=(1, 3, imgSize, imgSize), verbose = 0))\n\n\n random.seed(1977)\n df = dm.test_df.copy()\n df = df.reset_index()\n\n images = []\n ageRange = list(np.arange(0.5,21,1.8))\n for a in ageRange:\n subdf = df.query(\"age >= @a\")\n subdf = subdf.sort_values([\"age\"])\n images.append({\"key\": subdf.iloc[0][\"index\"], \"age\": subdf.iloc[0][\"age\"]})\n\n\n for j, batch in enumerate(dm.test_dataloader(batch_size = 1)):\n ij = -1\n for l, k in enumerate(images):\n if k[\"key\"] == j:\n ij = l\n if ij == -1:\n continue\n\n y = batch[1].cuda()\n torch_img = batch[0].cuda()\n\n # its AMR\n occlusion = Occlusion(model)\n attributions_occ = occlusion.attribute(torch_img.cuda(),\n strides = (3, 8, 8),\n target=y,\n sliding_window_shapes=(3, 48, 48),\n baselines=0)\n\n transformed_img = (torch_img - torch.min(torch_img))/(torch.max(torch_img) - torch.min(torch_img))\n rt = np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1,2,0))\n s = (rt[:,:,0] + rt[:,:,1] + rt[:,:,2])/3.0\n rt[:,:,0] = s; rt[:,:,1] = s; rt[:,:,2] = s\n rt = rt/np.max(rt)*255\n rtA = np.asarray(rt, dtype = np.uint8)\n\n rt = np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1,2,0))\n rtmax = np.max([np.abs(np.min(rt)), np.abs(np.max(rt))])\n rt = (rt + rtmax)/(2*rtmax)\n rt = rt*255\n\n rtB = np.asarray(rt, dtype = np.uint8)\n rtB = cv2.applyColorMap(rtB, cv2.COLORMAP_TWILIGHT_SHIFTED)\n\n rtC = 1.0*rtA + rtB*0.5\n rtC = rtC/(np.max(rtC))\n rtC = np.asarray(255*rtC, dtype = np.uint8)\n rImg = np.hstack([rtA,rtB,rtC])\n images[ij][\"pred\"] = y.cpu().numpy()\n images[ij][\"rtA\"] = rtA\n images[ij][\"rtB\"] = rtB\n images[ij][\"rtC\"] = rtC\n images[ij][\"rImg\"] = rImg\n\n sp = 16\n fx = 4; fy = 3\n finalImg = np.zeros((512*fy+sp*fy+sp, 512*fx+sp*fx+sp, 3), dtype = np.uint8)\n finalImg = 255 + finalImg\n\n MAE, testResults = getPreds (\"AMR\", \"test\")\n for j, k in enumerate(images):\n o1 = sp + (sp+512)*(j//fx)\n o2 = sp + (sp+512)*(j%fx)\n k = mycropImage(images[j][\"rtC\"])\n #k = images[j][\"rtC\"]\n age = np.round(testResults.iloc[images[j][\"key\"]][\"Age\"],1)\n pred = np.round(testResults.iloc[images[j][\"key\"]][\"test_preds\"],1)\n k[460:,:,:] = 0\n print (age, pred)\n k = addText (k, text=\"True: \"+str(age) + \"y, Prediction: \" + str(pred) +\"y\", org = (20, 465), fontSize = 36)\n finalImg[o1:o1+512,o2:o2+512,:] = k\n\n\n z = cv2.resize(finalImg, (0,0), fx=2.5, fy = 2.5)\n cv2.imwrite(\"./paper/Figure_8.png\", z)\n\n#\n","repo_name":"aydindemircioglu/scout.view.age","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":6143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"30691162457","text":"from ._make import Maker\r\nfrom .methods import users, files, posts\r\nfrom typing import Optional\r\n\r\nclass Client:\r\n def __init__(self, auth: str) -> None:\r\n self.make = Maker(auth)\r\n self.Users = users.Users(maker=self.make)\r\n self.Files = files.Files(maker=self.make)\r\n self.Posts = posts.Posts(maker=self.make)\r\n\r\n async def getMyProfileInfo(self, profile_id: Optional[str] = None) -> dict:\r\n return await self.Users.getMyProfileInfo(profile_id=profile_id)\r\n\r\n async def requestUploadFile(self, file_name: str, file_size: int, file_type: str, profile_id: str) -> dict:\r\n return await self.Files.requestUploadFile(\r\n file_name=file_name,\r\n file_size=file_size,\r\n file_type=file_type,\r\n profile_id=profile_id,\r\n )\r\n\r\n async def uploadFile(self, upload_url: str, file: bytes, hash_file_request: str, file_id: str) -> str:\r\n return await self.Files.uploadFile(\r\n upload_url=upload_url,\r\n file=file,\r\n hash_file_request=hash_file_request,\r\n file_id=file_id,\r\n )\r\n \r\n async def addPost(self,\r\n post_type: str,\r\n profile_id: str,\r\n file_id: str,\r\n hash_file_receive: str,\r\n thumbnail_hash_file_receive: str,\r\n thumbnail_file_id: str,\r\n is_multi_file=False,\r\n height='200',\r\n width='200',\r\n caption=None\r\n ):\r\n return await self.Posts.addPost(\r\n post_type,\r\n profile_id,\r\n file_id,\r\n hash_file_receive,\r\n thumbnail_hash_file_receive,\r\n thumbnail_file_id,\r\n is_multi_file,\r\n height,\r\n width,\r\n caption,\r\n )","repo_name":"shayanheidari01/rubino","sub_path":"pyrino/_rubino.py","file_name":"_rubino.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"15"} +{"seq_id":"8839479891","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('meet', '0009_auto_20181004_1220'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='meet',\n name='share_image_url',\n field=models.CharField(default=b'', max_length=200, null=True, verbose_name='\\u5206\\u4eab\\u56fe\\u7247', blank=True),\n ),\n migrations.AddField(\n model_name='meet',\n name='share_path',\n field=models.CharField(default=b'', max_length=32, null=True, verbose_name='\\u5206\\u4eab\\u8def\\u5f84', blank=True),\n ),\n migrations.AddField(\n model_name='meet',\n name='share_title',\n field=models.CharField(default=b'', max_length=32, null=True, verbose_name='\\u5206\\u4eab\\u6807\\u9898', blank=True),\n ),\n migrations.AlterField(\n model_name='meet',\n name='latitude',\n field=models.FloatField(default=0, verbose_name='\\u7eac\\u5ea6'),\n ),\n migrations.AlterField(\n model_name='meet',\n name='longitude',\n field=models.FloatField(default=0, verbose_name='\\u7ecf\\u5ea6'),\n ),\n ]\n","repo_name":"bushitan/huaxun_server","sub_path":"meet/migrations/0010_auto_20181010_2223.py","file_name":"0010_auto_20181010_2223.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"20207430649","text":"import copy\ndef main():\n b = LoadFromFile(\"test.txt\")\n DebugPrint(b)\n print(BFS(b))\n #print(DFS(b))\n print(bidirectionalsearch(b))\n\n#arguments: file\n#returns: 2d tuple of board\ndef LoadFromFile(filepath):\n board = []\n n = 0\n with open(filepath, 'r') as file:\n count = 0\n for line in file:\n line1 = []\n if count == 0:\n n = file\n else:\n row = line.strip().split(\"\\t\")\n for element in row:\n if element == \"*\":\n line1.append(\"0\")\n else:\n line1.append(element)\n board.append(tuple(line1))\n count += 1\n return tuple(board)\n\n#arguments: 2d tuple of board\n#returns: nothing, prints out board\ndef DebugPrint(state):\n s = \"\"\n for element in state:\n for number in element:\n s += number + \" \"\n print(s)\n s = \"\"\n\n#arguments: 2d tuple of board\n#returns: 2d tuple of board\ndef FindHole(state):\n for i in range(len(state)):\n for j in range(len(state)):\n if state[i][j] == \"0\":\n return tuple([i, j])\n\n#arguments: 2d tuple of board, tuple pair of hole location, tuple pair being switched\n#returns: 2d tuple of board\ndef switch(state, hole, switched):\n newState = list(list(row) for row in copy.deepcopy(state))\n hole_row, hole_col = hole\n switch_row, switch_col = switched\n newState[hole_row][hole_col], newState[switch_row][switch_col] = newState[switch_row][switch_col], newState[hole_row][hole_col]\n return tuple(tuple(row) for row in newState)\n\n\n#arguments: 2d tuple of board\n#returns: 2d tuple of resulting neighbor moves, ((int, 1d list))\ndef ComputeNeighbors(state):\n neighbor = []\n hole = []\n hole = FindHole(state)\n row, col = hole\n if col + 1 <= len(state) -1:\n neighbor.append([state[row][col + 1], switch(state, hole, (row, col + 1))])\n if col - 1 >= 0:\n neighbor.append([state[row][col - 1], switch(state, hole, (row, col - 1))])\n if row + 1 <= len(state) -1:\n neighbor.append([state[row + 1][col], switch(state, hole, (row + 1, col))])\n if row - 1 >= 0:\n neighbor.append([state[row - 1][col], switch(state, hole, (row - 1, col))])\n \n return neighbor\n\n#arguments: 2d tuple of board\n#returns: return True/False, is the state the goal state\ndef IsGoal(state):\n position = 0\n n = len(state)\n for i in range(n):\n for j in range(n):\n if not int(state[i][j]) == position + 1:\n return False\n if i == n-1 and j == n-2:\n return True\n position += 1\n\n\n#arguments: 2d tuple of board\n#returns: 1d array of tile path to reach goal\ndef BFS(state):\n frontier = [(0, state)]\n discovered = set(state)\n parents = {(0, state): None}\n path = []\n while len(frontier) != 0:\n current_state = frontier.pop(0)\n discovered.add(current_state[1])\n if IsGoal(current_state[1]):\n while parents.get((current_state[0], current_state[1])) != None:\n path.insert(0, current_state[0])\n current_state = parents.get((current_state[0], current_state[1]))\n return path\n for neighbor in ComputeNeighbors(current_state[1]):\n if neighbor[1] not in discovered:\n frontier.append(neighbor)\n discovered.add(neighbor[1])\n parents.update({(neighbor[0], neighbor[1]): current_state})\n print(\"FAIL\")\n return None\n\n#arguments: 2d tuple of board\n#returns: 1d array of tile path to reach goal\ndef DFS(state):\n frontier = [(0, state)]\n discovered = set(state)\n parents = {(0, state): None}\n path = []\n while len(frontier) != 0:\n current_state = frontier.pop(0)\n discovered.add(current_state[1])\n if IsGoal(current_state[1]):\n while parents.get((current_state[0], current_state[1])) != None:\n path.insert(0, current_state[0])\n current_state = parents.get((current_state[0], current_state[1]))\n return path\n for neighbor in ComputeNeighbors(current_state[1]):\n if neighbor[1] not in discovered:\n frontier.insert(0, neighbor)\n discovered.add(neighbor[1])\n parents.update({(neighbor[0], neighbor[1]): current_state})\n print(\"FAIL\")\n return None\n\n#arguments: len of state, int\n#returns: 2d tuple of goal board\ndef findGoal(n):\n Total = n**2\n count = 1\n End_state = []\n for i in range(n):\n row = []\n for j in range(n):\n if count == Total:\n row.append(\"0\")\n else:\n row.append(str(count))\n count+=1\n End_state.append(tuple(row))\n \n return tuple(End_state)\n\n#arguments: 2d tuple of board\n#returns: 1d array of tile path to reach goal\ndef bidirectionalsearch(state):\n goal = findGoal(len(state))\n frontier = [(0, state)]\n frontier2 = [(0, goal)]\n discovered1 = set([state])\n discovered2 = set([goal])\n parents1 = {state: []}\n parents2 = {goal: []}\n path = []\n while len(frontier) != 0 and len(frontier2) != 0:\n currentState1 = frontier.pop(0)\n currentState2 = frontier2.pop(0)\n \n discovered1.add(currentState1[1])\n discovered2.add(currentState2[1])\n \n if len(discovered1.intersection(discovered2)) > 0:\n intersect = list(discovered1.intersection(discovered2))[0]\n forwardPath = parents1[intersect]\n backwardsPath = list(reversed(parents2[intersect]))\n return forwardPath + backwardsPath\n\n\n for neighbor in ComputeNeighbors(currentState1[1]):\n if neighbor[1] not in discovered1:\n frontier.append(neighbor)\n discovered1.add(neighbor[1])\n parents1.update({neighbor[1]: parents1[currentState1[1]] + [neighbor[0]]})\n\n for neighbor in ComputeNeighbors(currentState2[1]):\n if neighbor[1] not in discovered2:\n frontier2.append(neighbor)\n discovered2.add(neighbor[1])\n parents2.update({neighbor[1]: parents2[currentState2[1]] + [neighbor[0]]})\n\n return None\n\n\nmain()\n","repo_name":"ragrawal123/Python","sub_path":"NPuzzle/puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":6314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"13933715817","text":"from os.path import join as join_path\nfrom subprocess import call\n\nfrom click import command\nfrom click import group\nfrom click import option\n\n\nAPP_NAME = \"/snake_eyes/snake_eyes\"\nBABEL_I18N_PATH = join_path(APP_NAME, \"translations\")\nMESSAGES_PATH = join_path(APP_NAME, \"translations\", \"messages.pot\")\n\n\n@group()\ndef cli():\n \"\"\"\n Perform i18n tasks\n \"\"\"\n pass\n\n\n@command()\ndef extract():\n \"\"\"\n Extracts all strings into pot file\n \"\"\"\n shell_command = (\n \"pybabel extract --mapping-file babel.cfg --keyword lazy_gettext\"\n f\"--output-file {MESSAGES_PATH} {APP_NAME}\"\n )\n return call(shell_command, shell=True)\n\n\n@command()\n@option(\"--language\", default=None, help=\"The output language\")\ndef init(language):\n \"\"\"\n Map translations to a different language.\n \"\"\"\n shell_command = (\n f\"pybabel init --input-file {MESSAGES_PATH}\"\n f\" --output-dir {BABEL_I18N_PATH} --locale {language}\"\n )\n return call(shell_command, shell=True)\n\n\n@command()\ndef translate():\n \"\"\"\n Creates new translations\n \"\"\"\n shell_command = f\"pybabel compile --directory {BABEL_I18N_PATH}\"\n return call(shell_command, shell=True)\n\n\n@command()\ndef update():\n \"\"\"\n Updates existing translations\n \"\"\"\n shell_command = (\n f\"pybabel update --input-file {MESSAGES_PATH} --output-dir {BABEL_I18N_PATH}\"\n )\n return call(shell_command, shell=True)\n\n\ncli.add_command(extract)\ncli.add_command(init)\ncli.add_command(translate)\ncli.add_command(update)\n","repo_name":"onlinejudge95/snake_eyes","sub_path":"cli/commands/command_babel.py","file_name":"command_babel.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"22463853856","text":"import time\n\n# memoization\ndef fib(n, computed = {0: 0, 1: 1}):\n if n not in computed:\n computed[n] = fib(n-1, computed) + fib(n-2, computed)\n\n return computed[n]\n\ndef my_fib(n):\n a, b = 0, 1\n for _ in range(n):\n a, b = b, a + b\n return a\n \n\nif __name__ == \"__main__\":\n time_one = time.time()\n print(my_fib(400))\n time_one_finish = '{:.5f}'.format(time.time() - time_one)\n print(f\"Memoized fib completed: {time_one_finish}\")\n ","repo_name":"louiskwt/try_try","sub_path":"python/problem/small_problems/fib/my_fibonacci.py","file_name":"my_fibonacci.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"28805058201","text":"\nimport numpy as np\n\ndef nextthing():\n var = np.random.rand(1)\n if var<0.3:\n print('Attack human!')\n elif var<0.7:\n print('Be cute.')\n else:\n print('Make weird noises.')","repo_name":"isphus1973/gitast","sub_path":"reaction.py","file_name":"reaction.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"69889051532","text":"from dataclasses import dataclass\n\n\n@dataclass\nclass Packet:\n src: str\n sport: int\n dst: str\n dport: int\n protocol: str\n transmission_protocol: str\n communication_protocol: str\n\n def __hash__(self):\n return hash(\n (\n self.src,\n self.sport,\n self.dst,\n self.dport,\n self.protocol,\n self.transmission_protocol,\n self.communication_protocol\n )\n )\n","repo_name":"avltree9798/hoshiko-nids","sub_path":"hoshi/packet.py","file_name":"packet.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"3551413518","text":"class Person:\n def __init__(self, name= \"n\", num = 0, region = \"n\"):\n self.name = name\n self.num = num\n self.region = region\n\nn = int(input())\n\narr = []\n\nfor _ in range(n):\n arr.append(Person(*tuple(input().split())))\n\narr.sort(key=lambda x: x.name)\nperson = arr[len(arr)-1]\n\n\nprint(f\"name {person.name}\")\nprint(f\"addr {person.num}\")\nprint(f\"city {person.region}\")\n","repo_name":"greenClothesZelda/codeTree","sub_path":"codetree/sort/Sort_2-6.py","file_name":"Sort_2-6.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"18437704230","text":"#! /usr/bin/env python3\nimport gspread\nimport slack_bolt\nimport datetime\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nscope = [\n 'https://www.googleapis.com/auth/drive',\n 'https://www.googleapis.com/auth/drive.file'\n]\n\nurl = 'https://docs.google.com/spreadsheets/d/1AHSiG8pJN5wZjg84mqOV3HsCXf8_3yNSRRp7Fk2utiM'\nkey = 'phillyhc-client-key.json'\ncreds = ServiceAccountCredentials.from_json_keyfile_name(key, scope)\nclient = gspread.authorize(creds)\nsheet = client.open_by_url(url)\n\nautogenerated = sheet.get_worksheet(1).get_all_records()\nslack_user_ids = sheet.get_worksheet(3).get_all_records()\n\nchannel_id = \"GULQKKBRB\"\n\napp = slack_bolt.App(\n token=\"\",\n signing_secret=\"\",\n)\n\n\ndef main():\n next_friday = next_weekday(datetime.date.today(), 4)\n pair = next_pair(next_friday, autogenerated)\n\n message = \"\"\n\n if pair[\"People\"] == \"No Cell\":\n message = f\"No cell this week! Event: {pair['Event']}\"\n\n else:\n person1, person2 = pair[\"People\"].split(\"/\")\n\n person1_id = [ p[\"Slack User ID\"] for p in slack_user_ids if p[\"Name\"] == person1 ][0]\n person2_id = [ p[\"Slack User ID\"] for p in slack_user_ids if p[\"Name\"] == person2 ][0]\n\n message = f\"<@{person1_id}> and <@{person2_id}> are on cell dinner this week!\"\n\n app.client.chat_postMessage(\n channel=channel_id,\n text=message,\n )\n\n\ndef next_weekday(d, weekday):\n days_ahead = weekday - d.weekday()\n if days_ahead <= 0: # Target day already happened this week\n days_ahead += 7\n return d + datetime.timedelta(days_ahead)\n\n\ndef next_pair(d, rotation):\n for pair in rotation:\n date = datetime.datetime.strptime(pair[\"Date\"], \"%Y-%m-%d\").date()\n if date == d:\n return pair\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"colsephiroth/jonscripts","sub_path":"dinner-reminder.py","file_name":"dinner-reminder.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"71991018606","text":"from get_data import get_data\n\ndata = get_data(2021, 4)\n\nnums, *boards = data.split(\"\\n\\n\")\nnums = list(map(int, nums.split(\",\")))\nboards = [[list(map(int, row.split())) for row in b.splitlines()] for b in boards]\n\n\ndef find_winners(nums, boards):\n drawn = set()\n complete = [False for _ in range(len(boards))]\n for num in nums:\n drawn.add(num)\n for i, board in enumerate(boards):\n horizontal = any(all(r in drawn for r in row) for row in board)\n vertical = any(all(r[i] in drawn for r in board) for i in range(len(board)))\n if horizontal or vertical:\n complete[i] = True\n yield num * sum(r for row in board for r in row if r not in drawn)\n if all(complete):\n return\n\n\nprint(next(find_winners(nums, boards)))\n\nfor winner in find_winners(nums, boards):\n pass\nprint(winner)\n","repo_name":"aglove2189/AdventOfCode","sub_path":"src/2021/day04.py","file_name":"day04.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36572963308","text":"import collections\n\n\ndef get_int():\n return int(input())\n\n\ndef get_line():\n return input().strip()\n\n\ndef get_ints():\n return [\n int(i)\n for i in input().split()\n ]\n\n\ndef is_valid(a, b, needed, start):\n made_from = collections.defaultdict(int, {start: 1})\n needed_dict = collections.defaultdict(int, enumerate(needed))\n\n top = start\n\n # print(f\"Loop from {start}\")\n while top >= 0:\n # print(made_from, needed_dict)\n top_current = made_from.pop(top, 0)\n if top_current >= needed_dict[top]:\n top_current -= needed_dict[top]\n needed_dict[top] = 0\n made_from[top-a] += top_current\n made_from[top-b] += top_current\n else:\n return False\n top -= 1\n\n return sum(needed_dict.values()) == 0\n\n\ndef so_far(a, b):\n return b is None or a+1 < b\n\n\ndef middle(a, b):\n if b is None:\n return a*2\n else:\n return (a+b) // 2\n\n\ndef do_one_step():\n n, a, b = get_ints()\n needed = get_ints()\n\n left = 1\n right = None\n while so_far(left, right):\n mid = middle(left, right)\n if mid > 100_000:\n return \"IMPOSSIBLE\"\n if is_valid(a, b, needed, mid):\n right = mid\n else:\n left = mid\n\n return right+1\n\n\ndef main():\n n = get_int()\n for i in range(1, n+1):\n print(\"Case #%s: %s\" % (i, do_one_step()))\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"zhuny/Codejam","sub_path":"solution/J/JU/Subtransmutation/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"24179745945","text":"import os\nimport os.path as osp\nimport pathlib\n\n\nimport torch\nimport torch.nn.functional as F\nfrom rdkit import Chem, RDLogger\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nfrom torch_geometric.data import InMemoryDataset, download_url\nfrom hydra.utils import get_original_cwd\n\nfrom sparse_diffusion.utils import PlaceHolder\nfrom sparse_diffusion.datasets.abstract_dataset import (\n MolecularDataModule,\n AbstractDatasetInfos,\n)\nfrom sparse_diffusion.datasets.dataset_utils import (\n save_pickle,\n mol_to_torch_geometric,\n load_pickle,\n Statistics,\n)\nfrom sparse_diffusion.metrics.molecular_metrics import SparseMolecule\nfrom sparse_diffusion.metrics.metrics_utils import compute_all_statistics\n\n\natom_encoder = {\"C\": 0, \"N\": 1, \"S\": 2, \"O\": 3, \"F\": 4, \"Cl\": 5, \"Br\": 6}\natom_decoder = [\"C\", \"N\", \"S\", \"O\", \"F\", \"Cl\", \"Br\"]\n\n\nclass RemoveYTransform:\n def __call__(self, data):\n data.y = torch.zeros((1, 0), dtype=torch.float)\n return data\n\n\nclass MosesDataset(InMemoryDataset):\n train_url = \"https://media.githubusercontent.com/media/molecularsets/moses/master/data/train.csv\"\n val_url = \"https://media.githubusercontent.com/media/molecularsets/moses/master/data/test.csv\"\n test_url = \"https://media.githubusercontent.com/media/molecularsets/moses/master/data/test_scaffolds.csv\"\n\n def __init__(\n self,\n split,\n root,\n transform=None,\n pre_transform=None,\n pre_filter=None,\n ):\n self.split = split\n self.atom_encoder = atom_encoder\n if self.split == \"train\":\n self.file_idx = 0\n elif self.split == \"val\":\n self.file_idx = 1\n else:\n self.file_idx = 2\n\n super().__init__(root, transform, pre_transform, pre_filter)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n self.statistics = Statistics(\n num_nodes=load_pickle(self.processed_paths[1]),\n node_types=torch.from_numpy(np.load(self.processed_paths[2])).float(),\n bond_types=torch.from_numpy(np.load(self.processed_paths[3])).float(),\n charge_types=torch.from_numpy(np.load(self.processed_paths[4])).float(),\n valencies=load_pickle(self.processed_paths[5]),\n )\n self.smiles = load_pickle(self.processed_paths[6])\n\n @property\n def raw_file_names(self):\n return [\"train_moses.csv\", \"val_moses.csv\", \"test_moses.csv\"]\n\n @property\n def split_file_name(self):\n return [\"train_moses.csv\", \"val_moses.csv\", \"test_moses.csv\"]\n\n @property\n def processed_file_names(self):\n return [\n f\"{self.split}.pt\",\n f\"{self.split}_n.pickle\",\n f\"{self.split}_node_types.npy\",\n f\"{self.split}_bond_types.npy\",\n f\"{self.split}_charge.npy\",\n f\"{self.split}_valency.pickle\",\n f\"{self.split}_smiles.pickle\",\n ]\n\n def download(self):\n import rdkit # noqa\n\n train_path = download_url(self.train_url, self.raw_dir)\n os.rename(train_path, osp.join(self.raw_dir, \"train_moses.csv\"))\n\n test_path = download_url(self.test_url, self.raw_dir)\n os.rename(test_path, osp.join(self.raw_dir, \"val_moses.csv\"))\n\n valid_path = download_url(self.val_url, self.raw_dir)\n os.rename(valid_path, osp.join(self.raw_dir, \"test_moses.csv\"))\n\n def process(self):\n RDLogger.DisableLog(\"rdApp.*\")\n\n smile_list = pd.read_csv(self.raw_paths[self.file_idx])\n smile_list = smile_list[\"SMILES\"].values\n data_list = []\n smiles_kept = []\n charge_list = set()\n\n for i, smile in enumerate(tqdm(smile_list)):\n mol = Chem.MolFromSmiles(smile)\n\n if mol is not None:\n data = mol_to_torch_geometric(mol, atom_encoder, smile)\n unique_charge = set(torch.unique(data.charge).int().numpy())\n charge_list = charge_list.union(unique_charge)\n\n if self.pre_filter is not None and not self.pre_filter(data):\n continue\n if self.pre_transform is not None:\n data = self.pre_transform(data)\n data_list.append(data)\n smiles_kept.append(smile)\n\n statistics = compute_all_statistics(\n data_list, self.atom_encoder, charge_dic={0: 0}\n )\n save_pickle(statistics.num_nodes, self.processed_paths[1])\n np.save(self.processed_paths[2], statistics.node_types)\n np.save(self.processed_paths[3], statistics.bond_types)\n np.save(self.processed_paths[4], statistics.charge_types)\n save_pickle(statistics.valencies, self.processed_paths[5])\n print(\n \"Number of molecules that could not be mapped to smiles: \",\n len(smile_list) - len(smiles_kept),\n )\n save_pickle(set(smiles_kept), self.processed_paths[6])\n torch.save(self.collate(data_list), self.processed_paths[0])\n\n\nclass MosesDataModule(MolecularDataModule):\n def __init__(self, cfg):\n self.cfg = cfg\n self.datadir = cfg.dataset.datadir\n base_path = pathlib.Path(get_original_cwd()).parents[0]\n root_path = os.path.join(base_path, self.datadir)\n\n self.remove_h = False\n datasets = {\n \"train\": MosesDataset(\n split=\"train\", root=root_path, pre_transform=RemoveYTransform()\n ),\n \"val\": MosesDataset(\n split=\"val\", root=root_path, pre_transform=RemoveYTransform()\n ),\n \"test\": MosesDataset(\n split=\"test\", root=root_path, pre_transform=RemoveYTransform()\n ),\n }\n\n self.statistics = {\n \"train\": datasets[\"train\"].statistics,\n \"val\": datasets[\"val\"].statistics,\n \"test\": datasets[\"test\"].statistics,\n }\n super().__init__(cfg, datasets)\n\n\nclass MosesInfos(AbstractDatasetInfos):\n \"\"\"\n Moses will not support charge as it only contains one charge type 1\n \"\"\"\n\n def __init__(self, datamodule, cfg):\n # basic information\n self.name = \"moses\"\n self.is_molecular = True\n self.remove_h = False\n self.use_charge = False\n # statistics\n self.atom_encoder = atom_encoder\n self.atom_decoder = atom_decoder\n self.statistics = datamodule.statistics\n self.collapse_charge = torch.Tensor([-1, 0, 1]).int()\n self.train_smiles = datamodule.train_dataset.smiles\n self.val_smiles = datamodule.val_dataset.smiles\n self.test_smiles = datamodule.test_dataset.smiles\n super().complete_infos(datamodule.statistics, self.atom_encoder)\n\n # dimensions\n # atom_decoder = ['C', 'N', 'S', 'O', 'F', 'Cl', 'Br']\n self.output_dims = PlaceHolder(X=self.num_node_types, charge=0, E=5, y=0)\n\n # data specific settings\n # atom_decoder = ['C', 'N', 'S', 'O', 'F', 'Cl', 'Br']\n self.valencies = [4, 3, 2, 2, 1, 1, 1]\n self.atom_weights = [12, 14, 32, 16, 19, 35.4, 79.9]\n\n self.max_weight = 9 * 80 # Quite arbitrary\n","repo_name":"qym7/SparseDiff","sub_path":"sparse_diffusion/datasets/moses_dataset.py","file_name":"moses_dataset.py","file_ext":"py","file_size_in_byte":7181,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"33832221839","text":"from django import forms\nfrom django.forms import fields, models \nfrom . models import Review \n\nclass ReviewForm(forms.ModelForm):\n class Meta:\n model = Review\n exclude = (\"slug\",)\n labels = {\n \"name\":\"Your Name:\",\n \"text\":\"Your Comment:\",\n \"rating\":\"Your Rating:\",\n \"recommended\":\"Would You Recommended To Others ?\"\n } \n error_message = {\n \"name\":{\n \"required\":\"Your name must not be empty\",\n \"min_length\":\"please enter minimum 5 letters\"\n }\n\n } ","repo_name":"shamsuchoyimadathil/school-Management-app-django","sub_path":"reviews/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"43808939568","text":"def test1():\n \"\"\"Test the SocketTalk peers.\"\"\"\n from coils.SocketTalk import SocketTalk\n talk1, talk2 = SocketTalk.pair()\n talk1.put('one')\n assert talk2.get() == 'one'\n talk2.put('two')\n assert talk1.get() == 'two'\n talk2.close()\n assert talk1.get() is None\n talk1.close()\ntest1()\n\n\ndef test2():\n \"\"\"Test the SocketTalk client/server.\"\"\"\n from threading import Thread\n from coils.SocketTalk import SocketTalk\n\n addr = 'localhost', 50002\n\n def client():\n talk = SocketTalk.client(addr)\n talk.put('one')\n assert talk.get() == 'two'\n talk.close()\n\n def server():\n talk = SocketTalk.server(addr)\n assert talk.get() == 'one'\n talk.put('two')\n talk.close()\n\n Thread(target=client).start()\n Thread(target=server).start()\ntest2()\n","repo_name":"vmlaker/coils","sub_path":"coils/test/testSocketTalk.py","file_name":"testSocketTalk.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"269008358","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'kohou.wang'\n__time__ = '19-9-24'\n__email__ = 'oukohou@outlook.com'\n\n# If this runs wrong, don't ask me, I don't know why;\n# If this runs right, thank god, and I don't know why.\n# Maybe the answer, my friend, is blowing in the wind.\n# Well, I'm kidding... Always, Welcome to contact me.\n\n\"\"\"Description for the script:\ntrain SSR-Net.\n\"\"\"\n\nimport os\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\nimport time\nimport copy\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom sklearn.model_selection import train_test_split\nimport torch.nn as nn\nfrom datasets.read_imdb_data import IMDBDatasets\nfrom datasets.read_megaasian_data import MegaAgeAsianDatasets\nfrom datasets.read_face_age_data import FaceAgeDatasets\nfrom SSR_models.SSR_Net_model import SSRNet\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef train_model(model_, dataloaders_, criterion_, optimizer_, num_epochs_=25):\n global lr_scheduler\n \n since = time.time()\n val_acc_history = []\n \n best_model_wts = copy.deepcopy(model_.state_dict())\n best_acc = 0.0\n # tensorboard_writer.add_graph(model_, dataloaders_['train'])\n for epoch in range(num_epochs_):\n print('\\nEpoch {}/{}'.format(epoch, num_epochs_ - 1))\n print('-' * 10)\n \n # for phase in ['train', 'val']:\n for phase in sorted(dataloaders_.keys()):\n if phase == 'train':\n model_.train() # Set model to training mode\n print('in train mode...')\n else:\n print('in {} mode...'.format(phase))\n model_.eval() # Set model to evaluate mode\n \n running_loss = 0.0\n running_corrects_3 = 0\n running_corrects_5 = 0\n for i, (inputs, labels) in enumerate(dataloaders_[phase]):\n inputs = inputs.to(device)\n labels = labels.to(device).float()\n \n # zero the parameter gradients\n optimizer_.zero_grad()\n \n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model_(inputs)\n loss = criterion_(outputs, labels)\n \n if phase == 'train':\n loss.backward()\n optimizer_.step()\n \n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects_3 += torch.sum(torch.abs(outputs - labels) < 3) # CA 3\n running_corrects_5 += torch.sum(torch.abs(outputs - labels) < 5) # CA 5\n \n epoch_loss = running_loss / len(dataloaders_[phase].dataset)\n CA_3 = running_corrects_3.double() / len(dataloaders_[phase].dataset)\n CA_5 = running_corrects_5.double() / len(dataloaders_[phase].dataset)\n \n # print(\"inputs:{}\".format(inputs))\n # print(\"outputs:{}\".format(outputs))\n # print(\"labels:{}\".format(labels))\n \n print('{} Loss: {:.4f} CA_3: {:.4f}, CA_5: {:.4f}'.format(phase, epoch_loss, CA_3, CA_5))\n time_elapsed = time.time() - since\n print('Complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n \n # deep copy the model\n if phase == 'val' and CA_3 > best_acc:\n best_acc = CA_3\n best_model_wts = copy.deepcopy(model_.state_dict())\n if phase == 'val':\n val_acc_history.append(CA_3)\n \n lr_scheduler.step(epoch)\n \n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n print('Best val CA_3: {:4f}'.format(best_acc))\n \n # load best model weights\n model_.load_state_dict(best_model_wts)\n return model_, val_acc_history\n\n\nif __name__ == \"__main__\":\n train_data_base_path = '../age_estimation/datasets/megaage_asion/megaage_asian/megaage_asian/train'\n # batch_size = 1248\n batch_size = 50\n input_size = 64\n num_epochs = 90\n learning_rate = 0.0015 # originally 0.001\n weight_decay = 1e-4 # originally 1e-4\n augment = False\n load_pretrained = True\n \n \n model_to_train = SSRNet(image_size=input_size)\n if load_pretrained:\n loaded_model = torch.load(\n '../age_estimation/trained_models/SSR_Net_MegaAge_Asian/model_Adam_L1Loss_LRDecay_weightDecay0.0001_batch50_lr0.0015_epoch90+90_64x64.pth'\n )\n model_to_train.load_state_dict(loaded_model['state_dict'])\n \n # # for IMDB:\n # all_files = pd.read_csv(\"datasets/train.csv\")\n # all_files = all_files[:16000] # get a small part for fast convergence.\n # train_data_list, val_data_list = train_test_split(all_files, test_size=0.2, random_state=2019)\n #\n # # load dataset\n # train_gen = IMDBDatasets(train_data_list, train_data_base_path, mode=\"train\",\n # augment=augment,\n # )\n # train_loader = DataLoader(train_gen, batch_size=batch_size, shuffle=True, pin_memory=True,\n # num_workers=0)\n #\n # val_gen = IMDBDatasets(val_data_list, train_data_base_path,\n # augment=augment,\n # mode=\"train\",\n # )\n # val_loader = DataLoader(val_gen, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=0)\n \n import random\n \n # for MegaAgeAsian datasets:\n total_image_path = open(\n '../age_estimation/datasets/megaage_asion/megaage_asian/megaage_asian/list/train_name.txt').readlines()\n total_age_label = open(\n '../age_estimation/datasets/megaage_asion/megaage_asian/megaage_asian/list/train_age.txt').readlines()\n random.seed(2019)\n random.shuffle(total_image_path)\n random.seed(2019)\n random.shuffle(total_age_label)\n train_image_path = total_image_path[:int(len(total_image_path) * 0.9)]\n val_image_path = total_image_path[int(len(total_image_path) * 0.9):]\n \n train_age_label = total_age_label[:int(len(total_age_label) * 0.9)]\n val_age_label = total_age_label[int(len(total_age_label) * 0.9):]\n train_gen = MegaAgeAsianDatasets(train_image_path, train_age_label, train_data_base_path, mode=\"train\",\n augment=augment,\n )\n val_gen = MegaAgeAsianDatasets(val_image_path, val_age_label, train_data_base_path,\n augment=augment,\n mode=\"train\",\n )\n \n # # for face age Datasets\n # all_files = pd.read_csv(\"../age_estimation/datasets/face_age_train.csv\")\n # all_files = all_files.sample(frac=1.)\n # all_files = all_files[:4000] # get a small part for fast convergence.\n # train_data_list, val_data_list = train_test_split(all_files, test_size=0.2, random_state=2019)\n # train_gen = FaceAgeDatasets(train_data_list,)\n # val_gen = FaceAgeDatasets(val_data_list)\n \n train_loader = DataLoader(train_gen, batch_size=batch_size, shuffle=True, pin_memory=True,\n num_workers=0)\n val_loader = DataLoader(val_gen, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=0)\n \n test_image_path = open(\n '../age_estimation/datasets/megaage_asion/megaage_asian/megaage_asian/list/test_name.txt').readlines()\n test_age_label = open(\n '../age_estimation/datasets/megaage_asion/megaage_asian/megaage_asian/list/test_age.txt').readlines()\n test_data_base_path = '../age_estimation/datasets/megaage_asion/megaage_asian/megaage_asian/test'\n test_gen = MegaAgeAsianDatasets(test_image_path, test_age_label, test_data_base_path, mode=\"train\",\n augment=augment,\n )\n test_loader = DataLoader(test_gen, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=0)\n \n total_dataloader = {\n 'train': train_loader,\n 'val': val_loader,\n 'test': test_loader,\n }\n \n model_to_train = model_to_train.to(device)\n \n params_to_update = model_to_train.parameters()\n # Observe that all parameters are being optimized\n # optimizer_ft = optim.SGD(params_to_update, lr=learning_rate, momentum=0.9, weight_decay=weight_decay)\n optimizer_ft = optim.Adam(params_to_update, lr=learning_rate, weight_decay=weight_decay)\n # criterion = nn.MSELoss()\n criterion = nn.L1Loss()\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=30, gamma=0.1)\n \n # Train and evaluate\n model_to_train, hist = train_model(model_to_train, total_dataloader, criterion, optimizer_ft,\n num_epochs_=num_epochs,\n )\n \n torch.save({\n 'epoch': num_epochs,\n 'state_dict': model_to_train.state_dict(),\n 'optimizer_state_dict': optimizer_ft.state_dict(),\n },\n '../age_estimation/trained_models/SSR_Net_MegaAge_Asian/model_Adam_L1Loss_LRDecay_weightDecay{}_batch{}_lr{}_epoch{}+90_64x64.pth'.format(\n weight_decay, batch_size, learning_rate, num_epochs))\n","repo_name":"oukohou/SSR_Net_Pytorch","sub_path":"train_SSR-Net.py","file_name":"train_SSR-Net.py","file_ext":"py","file_size_in_byte":9405,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"2"} +{"seq_id":"74164428847","text":"import eg\n\neg.RegisterPlugin(\n\tname = \"EGMQonttrol\",\n\tauthor = \"Guilherme Ramos\",\n\tversion = \"0.0.1\",\n\tkind = \"other\",\n\tcanMultiLoad = False,\n\turl = \"https://github.com/guibom/WebMQonttrol\",\n\tdescription = \"Plugin used to interface with WebMQonttrol webpage.\"\n)\n\nimport json\nfrom os import walk\nfrom os.path import splitext, join\nfrom threading import Event, Thread\n\nclass EGMQonttrol(eg.PluginBase):\n\n\tdef __init__(self):\n\t\tself.started = False;\n\t\tself.AddAction(SendFileList)\n\n\tdef __start__(self):\n\t\tself.started = True;\n\n\tdef __stop__(self):\t\t\n\t\tself.started = False;\n\nclass SendFileList(eg.ActionBase):\n\tname = \"Get File List\"\n\tdescription = \"Get JSON list of files from path\"\n\n\tdef __call__(self):\n\t\ttry:\n\t\t\tpath = eg.event.payload[2:] #Used to remove first part of the payload\n\n\t\t\tif (path):\t\t\t\n\t\t\t\tfor (_, dirList, fileList) in walk(path):\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\tself.plugin.TriggerEvent(\"FileListCreated\", json.dumps({'root':path, 'files':fileList, 'folders':dirList}))\n\t\texcept:\n\t\t\tpass\n\n\t\t\n# broker = '192.168.0.175'\n# path = 'C:/Users/Longinus/Downloads'\n# MQTT = mosquitto.Mosquitto(\"EVMQonttrol\")\n# MQTT.connect(broker)\n# for (_, dirList, fileList) in walk(path):\t\n# \tbreak\n# MQTT.publish(\"home/htpc/listfiles/result\", json.dumps({'root':path, 'files':fileList, 'folders':dirList}))\n# OLD Code that sent the file extension too. Proved to be faster to quickly figure it out in javascript\n# filesObj = []\n# for currentFile in fileList:\n# \tfilesObj.append({'name':currentFile, 'type':splitext(join(path, currentFile))[1]})\n# MQTT.publish(\"home/htpc/listfiles/result\", json.dumps({'root':path, 'files':filesObj, 'folders':dirList}))","repo_name":"guibom/WebMQonttrol","sub_path":"Eventghost/plugins/EGMQonttrol/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"2"} +{"seq_id":"33094473115","text":"from datetime import date\nfrom django.shortcuts import redirect, render\nfrom .models import Book\nfrom django.http import HttpResponse\n# Create your views here.\n\nimport logging\nlogger = logging.getLogger('newlogger')\n\ndef func(request): # function based view\n return render(request, 'base.html')\n # print(request.method) #? to see the type of request\n # print('----------------In func-------------') #? testing purpose\n # return HttpResponse('Hi welcome to web page')\n # return JsonResponse({'key':'value'})\ndef testing_log(request):\n for i in range(20):\n logger.debug(f\"test no:- {i} started\")\n logging.debug(\"Message from debug\")\n logging.info(\"Message from info\")\n logging.warning(\"Message from warning\")\n logging.error(\"Message from error\")\n logging.critical(\"Message from critical\")\n logger.debug(f\"test no:- {i} ednded \")\n return HttpResponse(\"

Hi there

\")\n\ndef homepage(request): # we have to return something\n logger.info(f'{request.user}')\n logger.info(f'{request.build_absolute_uri}')\n if request.method == 'POST':\n data = request.POST\n if not data.get(\"id\"):\n if data['ispub'] == 'Yes':\n Book.objects.create(\n name=data['nm'],\n qty=data['qty'],\n price=data['price'],\n is_published=True,\n published_date=date.today()\n )\n elif data['ispub'] == 'No':\n Book.objects.create(\n name=data['nm'],\n qty=data['qty'],\n price=data['price'],\n # is_published=False #? defalut value is false\n #\n )\n return redirect('home')\n else:\n bid = data.get(\"id\")\n book_obj = Book.objects.get(id= bid)\n book_obj.name = data['nm']\n book_obj.qty = data['qty']\n book_obj.price = data['price']\n if data['ispub'] == 'Yes':\n if book_obj.is_published:\n pass\n else:\n book_obj.is_published = True\n book_obj.published_date = date.today()\n elif data['ispub'] == 'No':\n if book_obj.is_published == True:\n pass\n book_obj.save()\n return redirect('home')\n else:\n return render(request, template_name='home.html')\n\n\ndef get_books(request):\n \"\"\"To perfrom CRUDE Read operation\"\"\"\n books = Book.objects.all()\n return render(request, template_name='books.html', context={'all_books': books})\n\n\ndef delete_book(request, id):\n \"\"\"To perform CRUDE Delete operation\"\"\"\n # print(id, \"delete book id\") # debug\n Book.objects.get(id=id).delete()\n return redirect('show books')\n\ndef update_book(request ,id):\n \"\"\"To perform CRUD Update operation\"\"\"\n book_obj = Book.objects.get(id = id)\n return render(request, \"home.html\", context={\"single_book\":book_obj})\n \n\ndef soft_delete(request, id):\n book_obj = Book.objects.get(id = id)\n book_obj.is_deleted = 'Y'\n book_obj.save()\n return redirect('show books')\n\n\ndef active_books(request):\n # all_active_books = Book.objects.filter(is_deleted = 'N')\n all_active_books = Book.active_books.all()\n return render (request , template_name='books.html', context={'all_books': all_active_books})\n\n\ndef inactive_books(request):\n # all_inactive_books = Book.objects.filter(is_deleted = 'Y')\n all_inactive_books = Book.inactive_books.all()\n return render(request, template_name='books.html' , context={\"all_books\":all_inactive_books,\"book_status\":\"InActive\"})","repo_name":"shariquedev8051/second-practice-folder","sub_path":"Django/Library/Book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23057093280","text":"# -*- coding: utf-8 -*-\nimport json\nimport logging\nfrom functools import partial\n\nimport requests\n\n\nclass FlipkartAPI(object):\n \"\"\"\n Flipkart Marketplace Seller API Client.\n\n This client provides access to flipkart objects (orders, skus) in a generic\n way.\n\n You can read more about `Flipkart Marketplace API here\n `_\n\n :param access_token: Access token received at the end of an Authorization\n Code Flow or Client Credentials Flow.\n :param sandbox: True/False to connect to sandbox or not (Default: connects\n to production)\n :param debug: If enabled, spits out debug logs.\n\n Example::\n\n from flipkart import FlipkartAPI\n flipkart = FlipkartAPI(access_token='your_access_token')\n\n \"\"\"\n def __init__(self, access_token, sandbox=False, debug=False):\n self.access_token = access_token\n self.sandbox = sandbox\n self.debug = debug\n\n self.session = self.get_session()\n self.logger = self.get_logger()\n\n def get_session(self):\n \"\"\"\n Build a requests session that can be used to hold\n the authorization\n \"\"\"\n session = requests.Session()\n session.headers.update({\n 'Authorization': 'Bearer %s' % self.access_token,\n 'Content-type': 'application/json',\n })\n return session\n\n def get_logger(self):\n \"\"\"\n Return a logger\n \"\"\"\n logger = logging.getLogger('flipkart')\n logger.setLevel(logging.DEBUG if self.debug else logging.INFO)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG if self.debug else logging.INFO)\n logger.addHandler(ch)\n\n return logger\n\n def build_url(self, path, params=None):\n \"\"\"\n Given a path construct the full URL for sandbox or production\n \"\"\"\n # TODO: Handle parameters\n if path.startswith('/'):\n path = path[1:]\n\n if self.sandbox:\n return 'https://sandbox-api.flipkart.net/sellers/' + path\n else:\n return 'https://api.flipkart.net/sellers/' + path\n\n def request(self, path, params=None, body=None, method=\"GET\"):\n \"\"\"\n Makes a request and sends the response body back.\n \"\"\"\n url = self.build_url(path, params)\n self.logger.debug(\"Request:URL: %s\", url)\n self.logger.debug(\"Request:Method: %s\", method)\n\n if body is not None:\n payload = json.dumps(body)\n else:\n payload = None\n\n self.logger.debug(\"Request:Payload: %s\", payload)\n\n if method == 'GET':\n response = self.session.get(url, data=payload, verify=False)\n elif method == 'POST':\n response = self.session.post(url, data=payload, verify=False)\n else:\n raise ValueError('Unknown method %s' % method)\n\n self.logger.debug(\"Response:code: %s\", response.status_code)\n self.logger.debug(\"Response:content: %s\", response.content)\n\n # Raise an error if the response is not 2XX\n response.raise_for_status()\n\n response_json = response.json()\n\n if response_json.get('status') == 'failure':\n raise FlipkartMultiError(response_json['errors'])\n\n return response_json\n\n def sku(self, sku_id):\n \"\"\"\n Get a SKU\n \"\"\"\n return SKU(sku_id, self)\n\n def listing(self, listing_id):\n \"\"\"\n Get a listing\n \"\"\"\n return Listing(listing_id, sku=None, client=self, lazy=False)\n\n def bulk_listing(self, listings):\n \"\"\"\n Create and update listing attributes such as stock, price, and\n procurement SLA for multiple SKUs. A maximum of 10 listings can be\n updated.\n \"\"\"\n raise Exception('Not implemented yet')\n\n def search_orders(self, filters=None, page_size=None, sort=None):\n \"\"\"\n Search through the orders\n \"\"\"\n return OrderItem.search(self, filters, page_size, sort)\n\n\nclass BaseFlipkartError(Exception):\n \"\"\"\n Base class for all flipkart exceptions\n \"\"\"\n pass\n\n\nclass FlipkartError(BaseFlipkartError):\n \"\"\"\n Base class for Flipkart exceptions\n \"\"\"\n def __init__(self, code, message):\n self.code = code\n self.message = message\n super(FlipkartError, self).__init__(code, message)\n\n\nclass FlipkartMultiError(BaseFlipkartError):\n \"\"\"\n An API request could result in multiple errors. This abstracts away the\n detail by showing multiple errors\n \"\"\"\n def __init__(self, errors):\n self.errors = []\n for error in errors:\n self.errors.append(\n FlipkartError(error['errorCode'], error['message'])\n )\n super(FlipkartMultiError, self).__init__(\n '%d errors in request' % len(self.errors)\n )\n\n\nclass FlipkartCollection(object):\n \"\"\"\n Common parent class for collections like orders\n \"\"\"\n pass\n\n\nclass FlipkartResource(object):\n \"\"\"\n Common parent class for flikart resources like SKUs, listing etc\n \"\"\"\n pass\n\n\nclass SKU(FlipkartResource):\n \"\"\"\n Represents a SKU ientified by a SKU ID.\n\n :param sku_id: ID of the SKU\n :param client: The client connection the SKU will use to fetch and update\n \"\"\"\n def __init__(self, sku_id, client):\n self.sku_id = sku_id\n self.client = client\n\n def create_listing(self, **attributes):\n \"\"\"\n Creates a listing for the SKU with the given attributes. The listing\n is not saved and must be explicitly saved by the user.\n\n Example::\n\n new_listing = sku.create_listing(\n mrp=100\n )\n new_listing.save()\n \"\"\"\n return Listing(\n listing_id=None, client=self.client,\n sku=self, attributes=attributes\n )\n\n @property\n def listings(self):\n \"\"\"\n Return a list of listings, but it seems like flipkart allows only\n one listing per seller. So this should usually return just one listing.\n However, to keep the API consistent, this will return a list\n \"\"\"\n response = self.client.request(\n 'skus/%s/listings' % self.sku_id,\n )\n return [\n Listing(\n response['listingId'],\n self, self.client,\n attributes=response['attributeValues']\n )\n ]\n\n\nclass OrderItem(FlipkartResource):\n \"\"\"\n Represents an order item with ID order_item_id.\n An order represented by OrderId could have items from multiple sellers\n and the seller only has access to order_item_id(s).\n \"\"\"\n\n def __init__(self, order_item_id, client, attributes=None):\n self.order_item_id = order_item_id\n self.attributes = attributes\n\n def refresh_attributes(self):\n \"\"\"\n Fetch the order attributes from flipkart\n \"\"\"\n response = self.client.request(\n 'orders/%s' % self.order_item_id\n )\n self.attributes = response\n\n def __getattr__(self, name):\n if self.attributes is not None and name in self.attributes:\n return self.attributes[name]\n raise AttributeError(name)\n\n @classmethod\n def search(cls, client, filters=None, page_size=None, sort=None):\n \"\"\"\n Search for orders that meet the criteria.\n\n :param sort: A tuple of field and sort order Ex: `('orderDate', 'asc')`\n \"\"\"\n body = {\n 'filter': filters or {},\n }\n\n if page_size is not None:\n body['pagination'] = {\n 'pageSize': page_size\n }\n\n if sort is not None:\n body['sort'] = {\n 'field': sort[0],\n 'order': sort[1],\n }\n\n response = client.request(\n 'orders/search',\n body=body,\n method=\"POST\"\n )\n return PaginationIterator(\n client, response,\n 'orderItems',\n lambda item: partial(cls, client=client)(\n item['orderItemId'], attributes=item\n )\n )\n\n\nclass PaginationIterator(object):\n \"\"\"\n An iterable that lets the user infinitely browse through pages of a\n paginated response\n\n :param client: The API client to make subsequent requests with\n :param response: The dictionary of response with pagination\n :param key: The key that identifies the item iterable (ex:orderItems)\n :param cast_func: Each item in iterable is casted with this function\n if specidied\n \"\"\"\n def __init__(self, client, response, key, cast_func=None):\n self.client = client\n self.items = []\n self.key = key\n\n if cast_func is None:\n cast_func = lambda item: item\n self.cast_func = cast_func\n\n self._nextPageUrl = None\n\n self.update_items_from(response)\n\n def update_items_from(self, response):\n \"\"\"\n Given a response dictionary, update the items and urls\n \"\"\"\n self.items.extend(response[self.key])\n self._nextPageUrl = response.get('nextPageUrl')\n\n def __iter__(self):\n self._current_index = -1\n return self\n\n @property\n def count(self):\n return len(self.items)\n\n def __next__(self):\n self._current_index += 1\n\n if self._current_index >= self.count and self._nextPageUrl:\n # If the iterator has reached its end and there is a\n # nextPageUrl then get the fresh items and update\n self.update_items_from(self.client.request(self._nextPageUrl))\n\n if self._current_index < self.count:\n return self.cast_func(self.items[self._current_index])\n else:\n raise StopIteration()\n\n next = __next__\n\n\nclass Listing(FlipkartResource):\n \"\"\"\n Represents a Listing for a SKU\n \"\"\"\n def __init__(self, listing_id, sku, client, attributes=None, lazy=True):\n self.listing_id = listing_id\n self.sku = sku\n self.client = client\n self.attributes = attributes\n\n if self.listing_id and not self.attributes and not lazy:\n self.refresh_attributes()\n\n @classmethod\n def new(cls, client, sku, attributes):\n \"\"\"\n Create a new listing with the given attributes. This is not meant to\n be called directly, but through `sku.create_listing(mrp=100)` style.\n \"\"\"\n return cls(listing_id=None, sku=sku, attributes=attributes)\n\n def refresh_attributes(self):\n \"\"\"\n Fetch the attributes from flipkart\n \"\"\"\n if self.listing_id is None:\n raise ValueError('Cannot fetch attributes for an unsaved listing')\n\n response = self.client.request(\n 'skus/listings/%s' % self.listing_id\n )\n\n if self.sku is None:\n # Set the sku if the SKU was not known before\n self.sku = SKU(response['skuId'], self.client)\n\n self.attributes = response['attributeValues']\n return response\n\n def update(self, attributes):\n \"\"\"\n Update listing attributes such as stock, price, and pocurement SLA\n for a particular ListingID. For a more convenient API use\n `listing.save()` once the attributes have been changed.\n \"\"\"\n self.attributes = self.client.request(\n 'skus/listings/%s' % self.listing_id,\n body={'attributeValues': attributes},\n method=\"POST\"\n )['response']['attributeValues']\n return self.attributes\n\n def save(self):\n \"\"\"\n Save any changes to the listing by updating it\n \"\"\"\n if self.listing_id:\n return self.update(self.attributes)\n else:\n # This is a new listing. So create a new listing\n response = self.client.request(\n \"skus/%s/listings\" % self.sku.sku_id,\n body={\n 'fsn': None, # XXX: Where is that coming from ??\n 'attributeValues': self.attributes,\n },\n method=\"POST\"\n )['response']\n self.listing_id = response['listingId']\n self.refresh_attributes()\n","repo_name":"manish2409/python-flipkart","sub_path":"flipkart/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":12300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11009653815","text":"#!/usr/bin/env python\n#encoding=utf-8\n\n__cycle__ = 5\n\nfrom State import *\n\ns_1 = State(\"s1\")\ns_2 = State(\"s2\")\ne_event1 = s_1 > 0\ne_event2 = s_2 < 50\ne_main1 = e_event1.ands(e_event2.nots())\n\ne_main1.then().alert(\"halo\", \"wuchangwei\").doPlan(\"planid_1\")\n\n","repo_name":"logicouter/EventProcessEngine","sub_path":"aRule.py","file_name":"aRule.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"24139030200","text":"from __future__ import absolute_import, division, print_function\n\nimport tomopy.io.writer as writer\nfrom skimage import io as sio\nimport warnings\nimport numpy as np\nimport os\nimport h5py\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef _check_import(modname):\n try:\n return __import__(modname)\n except ImportError:\n logger.warn(modname + ' module not found')\n return None\n\nspefile = _check_import('spefile')\nnetCDF4 = _check_import('netCDF4')\nEdfFile = _check_import('EdfFile')\n\n__author__ = \"Doga Gursoy\"\n__credits__ = \"Francesco De Carlo\"\n__copyright__ = \"Copyright (c) 2015, UChicago Argonne, LLC.\"\n__docformat__ = 'restructuredtext en'\n__all__ = ['read_edf',\n 'read_hdf5',\n 'read_netcdf4',\n 'read_npy',\n 'read_spe',\n 'read_tiff',\n 'read_tiff_stack']\n\n\ndef _check_read(fname):\n known_extensions = ['.edf', '.tiff', '.tif', '.h5', '.hdf', '.npy']\n if not isinstance(fname, basestring):\n logger.error('file name must be a string')\n else:\n if writer.get_extension(fname) not in known_extensions:\n logger.error('unknown file extension')\n return os.path.abspath(fname)\n\n\ndef read_tiff(fname, slc=None):\n \"\"\"\n Read data from tiff file.\n\n Parameters\n ----------\n fname : str\n String defining the path or file name.\n slc : {sequence, int}\n Range of values for slicing data.\n ((start_1, end_1, step_1), ... , (start_N, end_N, step_N))\n defines slicing parameters for each axis of the data matrix.\n\n Returns\n -------\n ndarray\n Output 2D image.\n \"\"\"\n fname = _check_read(fname)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n arr = sio.imread(fname, plugin='tifffile', memmap=True)\n arr = _slice_array(arr, slc)\n return arr\n\n\ndef read_tiff_stack(fname, ind, digit, slc=None):\n \"\"\"\n Read data from stack of tiff files in a folder.\n\n Parameters\n ----------\n fname : str\n One of the file names in the tiff stack.\n ind : list of int\n Indices of the files to read.\n digit : int\n Number of digits in indexing stacked files.\n slc : {sequence, int}\n Range of values for slicing data.\n ((start_1, end_1, step_1), ... , (start_N, end_N, step_N))\n defines slicing parameters for each axis of the data matrix.\n \"\"\"\n fname = _check_read(fname)\n list_fname = _list_file_stack(fname, ind, digit)\n\n for m, image in enumerate(list_fname):\n _arr = read_tiff(list_fname[m], slc)\n if m == 0:\n dx = len(ind)\n dy, dz = _arr.shape\n arr = np.zeros((dx, dy, dz))\n arr[m] = _arr\n return arr\n\n\ndef read_edf(fname, slc=None):\n \"\"\"\n Read data from a edf file.\n\n Parameters\n ----------\n fname : str\n String defining the path or file name.\n slc : {sequence, int}\n Range of values for slicing data.\n ((start_1, end_1, step_1), ... , (start_N, end_N, step_N))\n defines slicing parameters for each axis of the data matrix.\n\n Returns\n -------\n ndarray\n Data.\n \"\"\"\n try:\n fname = _check_read(fname)\n f = EdfFile.EdfFile(fname, access='r')\n d = f.GetStaticHeader(0)\n arr = np.empty((f.NumImages, int(d['Dim_2']), int(d['Dim_1'])))\n for (i, ar) in enumerate(arr):\n arr[i::] = f.GetData(i)\n arr = _slice_array(arr, slc)\n except KeyError:\n arr = None\n\n return arr\n\n\ndef read_hdf5(fname, group, slc=None):\n \"\"\"\n Read data from hdf5 file from a specific group.\n\n Parameters\n ----------\n fname : str\n String defining the path or file name.\n group : str\n Path to the group inside hdf5 file where data is located.\n slc : {sequence, int}\n Range of values for slicing data.\n ((start_1, end_1, step_1), ... , (start_N, end_N, step_N))\n defines slicing parameters for each axis of the data matrix.\n\n Returns\n -------\n ndarray\n Data.\n \"\"\"\n try:\n fname = _check_read(fname)\n f = h5py.File(fname, \"r\")\n arr = f[group]\n arr = _slice_array(arr, slc)\n f.close()\n except KeyError:\n arr = None\n\n return arr\n\n\ndef read_netcdf4(fname, group, slc=None):\n \"\"\"\n Read data from netcdf4 file from a specific group.\n\n Parameters\n ----------\n fname : str\n String defining the path or file name.\n group : str\n Variable name where data is stored.\n slc : {sequence, int}\n Range of values for slicing data.\n ((start_1, end_1, step_1), ... , (start_N, end_N, step_N))\n defines slicing parameters for each axis of the data matrix.\n\n Returns\n -------\n ndarray\n Data.\n \"\"\"\n fname = _check_read(fname)\n f = netCDF4.Dataset(fname, 'r')\n arr = f.variables[group]\n arr = _slice_array(arr, slc)\n f.close()\n return arr\n\n\ndef read_npy(fname, slc=None):\n \"\"\"\n Read binary data from a ``.npy`` file.\n\n Parameters\n ----------\n fname : str\n String defining the path or file name.\n slc : {sequence, int}\n Range of values for slicing data.\n ((start_1, end_1, step_1), ... , (start_N, end_N, step_N))\n defines slicing parameters for each axis of the data matrix.\n\n Returns\n -------\n ndarray\n Data.\n \"\"\"\n fname = _check_read(fname)\n arr = np.load(fname)\n arr = _slice_array(arr, slc)\n return arr\n\n\ndef read_spe(fname, slc=None):\n \"\"\"\n Read data from spe file.\n\n Parameters\n ----------\n fname : str\n String defining the path or file name.\n slc : {sequence, int}\n Range of values for slicing data.\n ((start_1, end_1, step_1), ... , (start_N, end_N, step_N))\n defines slicing parameters for each axis of the data matrix.\n\n Returns\n -------\n ndarray\n Data.\n \"\"\"\n fname = _check_read(fname)\n f = spefile.PrincetonSPEFile(fname)\n arr = f.getData()\n arr = _slice_array(arr, slc)\n return arr\n\n\ndef _slice_array(arr, slc):\n \"\"\"\n Perform slicing on ndarray.\n\n Parameters\n ----------\n arr : ndarray\n Input array to be sliced.\n slc : {sequence, int}\n Range of values for slicing data.\n ((start_1, end_1, step_1), ... , (start_N, end_N, step_N))\n defines slicing parameters for each axis of the data matrix.\n\n Returns\n -------\n ndarray\n Sliced array.\n \"\"\"\n if not isinstance(slc, (tuple)):\n slc = (slc, )\n if all(v is None for v in slc):\n return arr[:]\n axis_slice = ()\n for m, s in enumerate(slc):\n if s is None:\n s = (0, )\n if len(s) < 2:\n s += (arr.shape[m], )\n if len(s) < 3:\n s += (1, )\n axis_slice = axis_slice + (slice(s[0], s[1], s[2]), )\n return arr[axis_slice]\n\n\ndef _list_file_stack(fname, ind, digit):\n \"\"\"\n Return a stack of file names in a folder as a list.\n\n Parameters\n ----------\n fname : str\n String defining the path or file name.\n ind : list of int\n Indices of the files to read.\n digit : int\n Number of digits in indexing stacked files.\n \"\"\"\n fname = os.path.abspath(fname)\n body = writer.get_body(fname, digit)\n ext = writer.get_extension(fname)\n list_fname = []\n for m in ind:\n list_fname.append(str(body + '{0:0={1}d}'.format(m, digit) + ext))\n return list_fname\n","repo_name":"pontusfischer/tomopy","sub_path":"tomopy/io/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"31428440529","text":"import pygame\nfrom Assets import *\nfrom Util import *\nimport re\n\n#TODO: Make shared with Session.py\nTILE_EMPTY = 0\nTILE_SHIP = 1\nTILE_MISS = 2\nTILE_SHIP_HIT = 3\n\nclass Board:\n def init(self, windowSurface, boardWidth, parent):\n self.windowSurface = windowSurface\n self.boardWidth = boardWidth\n self.parent = parent\n self.board = [[0 for x in range(boardWidth)] for y in range(boardWidth)]\n\n #Can be called for the n'th letter for n'th row. Starts from 0\n self._letters = []\n for x in range(self.boardWidth):\n self._letters.append(str(unichr(65 + x)))\n for y in range(self.boardWidth):\n self.board[x][y] = TILE_EMPTY\n\n #Some debug values\n self._gameAreaWidth = 350\n self._tileWidth = self._gameAreaWidth / boardWidth\n\n #TODO: Move fonts to Assets\n self.tinyFont = pygame.font.SysFont(None, 18)\n self.smallFont = pygame.font.SysFont(None, 24)\n self.mediumFont = pygame.font.SysFont(None, 36)\n self.largeFont = pygame.font.SysFont(None, 48)\n\n\n\n def update(self, events):\n startX = 270#(self.windowSurface.get_width() - self._gameAreaWidth) / 2\n startY = (self.windowSurface.get_height() - self._gameAreaWidth) / 2\n\n for x in range(self.boardWidth):\n posX = startX + x * self._tileWidth\n\n #Print letters\n letterText = self.smallFont.render(self._letters[x], True, COLOR_BLACK)\n letterTextRect = letterText.get_rect()\n letterTextRect.centerx = posX + self._tileWidth/2\n letterTextRect.bottom = startY - 4\n self.windowSurface.blit(letterText, letterTextRect)\n\n for y in range(self.boardWidth):\n posY = startY + y * self._tileWidth\n #Print unmbers\n if x == 0:\n letterText = self.smallFont.render(str(y + 1), True, COLOR_BLACK)\n letterTextRect = letterText.get_rect()\n letterTextRect.right = startX - 4\n letterTextRect.centery = posY + self._tileWidth / 2\n self.windowSurface.blit(letterText, letterTextRect)\n\n #Drawing tiles\n tile = self.board[x][y]\n tileRect = (posX +1, posY+1, self._tileWidth-2, self._tileWidth-2)\n borderRect = (posX , posY , self._tileWidth , self._tileWidth )\n if tile == TILE_EMPTY:\n pygame.draw.rect(self.windowSurface, COLOR_BLACK, borderRect, 2)\n rect = pygame.draw.rect(self.windowSurface, COLOR_WHITE, tileRect, 0)\n if tile == TILE_SHIP:\n rect = pygame.draw.rect(self.windowSurface, COLOR_DARK_GREY, tileRect, 0)\n pygame.draw.rect(self.windowSurface, COLOR_DARK_GREY, borderRect, 2)\n elif tile == TILE_SHIP_HIT:\n pygame.draw.rect(self.windowSurface, COLOR_DARK_GREY, borderRect, 2)\n rect = pygame.draw.rect(self.windowSurface, COLOR_DARK_GREY, tileRect, 0)\n pygame.draw.line(self.windowSurface, COLOR_RED, (posX, posY),\n (posX + self._tileWidth, posY + self._tileWidth), 3)\n pygame.draw.line(self.windowSurface, COLOR_RED, (posX + self._tileWidth, posY),\n (posX, posY + self._tileWidth), 3)\n elif tile == TILE_MISS:\n pygame.draw.rect(self.windowSurface, COLOR_BLACK, borderRect, 2)\n rect = pygame.draw.rect(self.windowSurface, COLOR_WHITE, tileRect, 0)\n pygame.draw.circle(self.windowSurface, COLOR_DARK_GREY,\n (posX + self._tileWidth/2, posY + self._tileWidth/2),\n self._tileWidth / 6)\n\n if clickedOnRect(rect, events):\n self.parent.onBoardClick(x, y)\n\n\n def getTileByIndex(self, x, y):\n return self.board[x][y]\n\n def setTileByIndex(self, x, y, value):\n #TODO: What if it's not legitimate?\n if x >= 0 and y >= 0 and x < self.boardWidth and y < self.boardWidth:\n self.board[x][y] = value\n\n #Coordinates expected to be in form \"A4\", \"C12\" etc\n def setTileByGameCoordinates(self, coordinates, value):\n #fails when there are more than 1 letters (\"AA3\")\n x = int(coordinates[1:]) - 1\n y = ord(coordinates[0])\n self.setTileByIndex(x, y, value)\n return\n","repo_name":"markotaht/Naval_Warfare_Simulator","sub_path":"UI/Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"6572324139","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'WF'\n\nfrom math import log\nimport numpy as np\nimport operator\n\n# step1 计算给定数据的熵\ndef calShannonEnt(dataset):\n # dataset 为list 并且里面每一个list的最后一个元素为label\n # 如[[1,1,'yes'],\n # [1,1,'yes'],\n # [1,0,'no'],\n # [0,0,'no'],\n # [0,1,'no']]\n\n numClass = len(dataset) # 获得list的长度 即实例总数 注(a)若为矩阵,则 len(dataset.shape[0])\n e_class = {} # 创建一个字典,来存储数据集合中不同label的数量 如 dataset包含3 个‘yes’ 2个‘no’ (用键-值对来存储)\n #遍历样本统计每个类中样本数量\n for example in dataset:\n if example[-1] not in e_class.keys(): # 如果当前标签在字典键值中不存在\n e_class[example[-1]] = 0 # 初值\n e_class[example[-1]] += 1 # 若已经存在 该键所对应的值加1\n shannonEnt = 0.0\n for k in e_class.keys():\n prob = e_class[k]/numClass # 计算单个类的熵值\n shannonEnt -= prob * log(prob, 2) # 累加每个类的熵值\n return shannonEnt\n\n\n# step2 计算信息增益 (判断哪个属性的分类效果好)\n\n# # 以属性i,value划分数据集\ndef split_dataset(dataset, i, value):\n ret_dataset = []\n for example in dataset:\n if example[i] == value: # 将符合特征的数据抽取出来 比如 属性wind={weak,strong} 分别去抽取: weak多少样本,strong多少样本\n ret_feature = example[:i] # 0-(attribute-1)位置的元素\n ret_feature.extend(example[i+1:]) # 去除了 attribute属性\n ret_dataset.append(ret_feature)\n return ret_dataset # 返回 attribbute-{A}\n\ndef choseBestFeature(dataset): # 选择最优的分类特征\n feature_count = len(dataset[0]) - 1\n baseEnt = calShannonEnt(dataset) # 原始的熵\n best_gain = 0.0\n best_feature = -1\n for i in range(feature_count):\n # python中的集合(set)数据类型,与列表类型相似,唯一不同的是set类型中元素不可重复\n unique_feature = set([example[i] for example in dataset])\n new_entropy = 0.0\n for value in unique_feature:\n sub_dataset = split_dataset(dataset, i, value) # 调用函数返回属性i下值为value的子集\n prob = len(sub_dataset)/len(dataset)\n new_entropy += prob * calShannonEnt(sub_dataset) # 计算每个类别的熵\n info_gain = baseEnt - new_entropy # 求信息增益\n if best_gain < info_gain:\n best_gain = info_gain\n best_feature = i\n return best_feature # 返回分类能力最好的属性索引值\n\n\n\ndef createTree(dataset, attribute):\n class_lable = [example[-1] for example in dataset] # 类别:男或女\n if class_lable.count(class_lable[0]) == len(class_lable):\n return class_lable[0]\n if len(dataset[0]) == 1:\n return majority_count(class_lable)\n best_feature_index = choseBestFeature(dataset) # 选择最优特征\n best_feature = attribute[best_feature_index]\n my_tree = {best_feature: {}} # 分类结果以字典形式保存\n del(attribute[best_feature_index])\n feature_value = [example[best_feature_index] for example in dataset]\n unique_f_value = set(feature_value)\n for value in unique_f_value:\n sublabel = attribute[:]\n my_tree[best_feature][value] = createTree(split_dataset(dataset, best_feature_index, value), sublabel)\n return my_tree\n\n\n\ndef majority_count(classlist):\n class_count = {}\n for vote in classlist:\n if vote not in class_count.keys():\n class_count[vote] = 0\n class_count[vote] += 1\n sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)\n # print(sorted_class_count) # [('yes', 3), ('no', 2)]\n return sorted_class_count[0][0]\n\n\ndef createDataSet1(): # 创造示例数据\n dataSet = [['长', '粗', '男'],\n ['短', '粗', '男'],\n ['短', '粗', '男'],\n ['长', '细', '女'],\n ['短', '细', '女'],\n ['短', '粗', '女'],\n ['长', '粗', '女'],\n ['长', '粗', '女']]\n labels = ['头发', '声音'] # 两个特征\n return dataSet, labels\n\n\nif __name__ == \"__main__\":\n dataset = [[1,1,'yes'],\n [1,1,'yes'],\n [1,0,'no'],\n [0,0,'no'],\n [0,1,'no']]\n print(len(dataset))\n print(calShannonEnt(dataset))\n\n # dataset = np.array(dataset)\n # print(dataset.shape[0])\n # print(calShannonEnt(dataset))\n\n # classlist = ['yes', 'yes', 'no', 'no', 'yes']\n # print(majority_count(classlist))\n dataSet, labels = createDataSet1() # 创造示列数据\n print(createTree(dataSet, labels)) # 输出决策树模型结果\n\n","repo_name":"flywangfang258/ML_in_action","sub_path":"classify_2_7/3_TREE/ID3_tree.py","file_name":"ID3_tree.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"39976378298","text":"#\n# @lc app=leetcode id=1 lang=python3\n#\n# [1] Two Sum\n#\n\n# @lc code=start\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n dic = {}\n for i in range(len(nums)):\n res = target - nums[i]\n if res in dic:\n return [dic[res], i]\n dic[nums[i]] = i\n return [0,0]\n# @lc code=end\n\n","repo_name":"mertdemirtas/Neetcode-150---Python","sub_path":"1.two-sum.py","file_name":"1.two-sum.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"863912620","text":"from __future__ import annotations\n\nfrom typing import Union, TYPE_CHECKING\n\nfrom datadog_api_client.model_utils import (\n ModelNormal,\n cached_property,\n unset,\n UnsetType,\n)\n\n\nif TYPE_CHECKING:\n from datadog_api_client.v2.model.user_team_permission_attributes import UserTeamPermissionAttributes\n from datadog_api_client.v2.model.user_team_permission_type import UserTeamPermissionType\n\n\nclass UserTeamPermission(ModelNormal):\n @cached_property\n def openapi_types(_):\n from datadog_api_client.v2.model.user_team_permission_attributes import UserTeamPermissionAttributes\n from datadog_api_client.v2.model.user_team_permission_type import UserTeamPermissionType\n\n return {\n \"attributes\": (UserTeamPermissionAttributes,),\n \"id\": (str,),\n \"type\": (UserTeamPermissionType,),\n }\n\n attribute_map = {\n \"attributes\": \"attributes\",\n \"id\": \"id\",\n \"type\": \"type\",\n }\n\n def __init__(\n self_,\n id: str,\n type: UserTeamPermissionType,\n attributes: Union[UserTeamPermissionAttributes, UnsetType] = unset,\n **kwargs,\n ):\n \"\"\"\n A user's permissions for a given team\n\n :param attributes: User team permission attributes\n :type attributes: UserTeamPermissionAttributes, optional\n\n :param id: The user team permission's identifier\n :type id: str\n\n :param type: User team permission type\n :type type: UserTeamPermissionType\n \"\"\"\n if attributes is not unset:\n kwargs[\"attributes\"] = attributes\n super().__init__(kwargs)\n\n self_.id = id\n self_.type = type\n","repo_name":"DataDog/datadog-api-client-python","sub_path":"src/datadog_api_client/v2/model/user_team_permission.py","file_name":"user_team_permission.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"2"} +{"seq_id":"24437130614","text":"import threading\n\ndbfile = \"/var/www/db/misafe.db\"\nclients = []\nglobalrgb = {}\n\nopened = threading.Event()\nunlocked = threading.Event()\n\n\nclass colors:\n blinkWrong = {\"r\": 255, \"g\": 0, \"b\": 0}\n colorOpen = {\"r\": 0, \"g\": 255, \"b\": 0}\n blinkCount = 5\n blinkSpeed = 1 # in sec\n blinkWhich = 1\n colorTrigger = threading.Event()\n colorEvent = None\n colorEventArgs = None\n\nlockDuration = 3\nopenDuration = 9.0\nrampDuration = 0.5\nvmax = 50\nvclose = 50\n\nclass Object(object):\n pass\n\nclass mapping:\n door = Object()\n lock = Object()\n ledBody = Object()\n ledEdge = Object()\n ledInner = Object()\n motion_detector = Object()\n door.out1 = 10 #zu\n door.out2 = 9 #auf\n door.in1 = 11 #zu\n lock.out1 = 22 #auf\n lock.out2 = 23 #zu\n lock.in1 = 24 #auf\n lock.in2 = 25 #zu\n ledBody.r = 17\n ledBody.g = 18\n ledBody.b = 27\n ledEdge.r = 5\n ledEdge.g = 6\n ledEdge.b = 13\n ledInner.w = 20\n motion_detector.in1 = 21\n\nclass state:\n stateName = [\n 'locked', #0\n 'rw_opening', #1\n 'rw_closing', #2\n 'rw_unlocked', #3\n 'safe_door_opening',#4\n 'safe_door_closing',#5\n 'unlocked', #6\n 'init', #7\n 'open', #8\n 'timeout' #9\n ]\n state = stateName.index('init')\n","repo_name":"julianullrich99/pi-safe","sub_path":"py/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"4381746759","text":"import os\nimport json\nimport torch\nimport torch.nn.functional as F\nimport logging\nimport numpy as np\n\nfrom torch.distributions import Categorical\nfrom transformers import BertConfig\n\nfrom data_preprocess_en import utils as dutils\n\n\nclass Params():\n \"\"\"Class that loads hyperparameters from a json file.\n\n Example:\n ```\n params = Params(json_path)\n print(params.learning_rate)\n params.learning_rate = 0.5 # change the value of learning_rate in params\n ```\n \"\"\"\n\n def __init__(self, json_path):\n with open(json_path) as f:\n params = json.load(f)\n self.__dict__.update(params)\n\n def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)\n\n def update(self, json_path):\n \"\"\"Loads parameters from json file\"\"\"\n with open(json_path) as f:\n params = json.load(f)\n self.__dict__.update(params)\n\n @property\n def dict(self):\n \"\"\"Gives dict-like access to Params instance by `params.dict['learning_rate']\"\"\"\n return self.__dict__\n\n\nclass RunningAverage():\n \"\"\"A simple class that maintains the running average of a quantity\n\n Example:\n ```\n loss_avg = RunningAverage()\n loss_avg.update(2)\n loss_avg.update(4)\n loss_avg() = 3\n ```\n \"\"\"\n\n def __init__(self):\n self.steps = 0\n self.total = 0\n\n def update(self, val):\n self.total += val\n self.steps += 1\n\n def __call__(self):\n return self.total / float(self.steps)\n\n\ndef set_logger(log_path):\n \"\"\"Set the logger to log info in terminal and file `log_path`.\n\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n\n Example:\n ```\n logging.info(\"Starting training...\")\n ```\n\n Args:\n log_path: (string) where to log\n \"\"\"\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n\n\ndef load_checkpoint(model, ckpt_dir, device, optimizer=None, scheduler=None):\n model.load_state_dict(torch.load(os.path.join(ckpt_dir, 'model.bin'), map_location=device))\n osd_path = os.path.join(ckpt_dir, 'optim.bin')\n if optimizer is not None and os.path.isfile(osd_path):\n optimizer.load_state_dict(torch.load(osd_path))\n ssd_path = os.path.join(ckpt_dir, 'sched.bin')\n if scheduler is not None and os.path.isfile(ssd_path):\n scheduler.load_state_dict(torch.load(ssd_path))\n best_val_bleu = 0.\n for x in os.listdir(ckpt_dir):\n if x.startswith('pred_dev'):\n best_val_bleu = float(x.split('_')[-1].split('.txt')[0])\n return model, optimizer, scheduler, best_val_bleu\n\n\ndef save_checkpoint(model, ckpt_dir, optimizer, scheduler):\n os.mkdir(ckpt_dir)\n torch.save(model.state_dict(), os.path.join(ckpt_dir, 'model.bin'))\n torch.save(optimizer.state_dict(), os.path.join(ckpt_dir, 'optim.bin'))\n torch.save(scheduler.state_dict(), os.path.join(ckpt_dir, 'sched.bin'))\n\n\ndef convert_tokens_to_string(tokens):\n \"\"\" Converts a sequence of tokens (string) in a single string. \"\"\"\n return ' '.join(tokens).replace(' ##', '').strip()\n\n\ndef filter_spans(starts, ends, max_i, stop_i=0):\n for i, start in enumerate(starts):\n end = ends[i]\n if start == stop_i or start > end or start >= max_i:\n starts[i] = ends[i] = -1\n continue\n starts = [s for s in starts if s > -1]\n ends = [e for e in ends if e > -1]\n assert(len(starts) == len(ends))\n return starts, ends\n\n\ndef get_sp_strs(start_lst, end_lst, context_len):\n max_i = context_len - 1\n starts, ends = filter_spans(start_lst, end_lst, max_i)\n if not starts:\n starts.append(0)\n ends.append(0)\n starts, ends = dutils.ilst2str(starts), dutils.ilst2str(ends)\n return starts, ends\n\n\ndef load_rules(rule_path, mask='_', fmask='{}'):\n with open(rule_path, encoding='utf8') as f:\n rules = [''] + [l.strip().replace(mask, fmask) for l in f]\n rule_slot_cnts = [sum(int(y == fmask) for y in x.split()) for x in rules]\n return rules, rule_slot_cnts\n\n\ndef get_config(params, bert_class, bleu_rl):\n config = BertConfig.from_pretrained(bert_class)\n config.bert_class = bert_class\n config.device = params.device\n config.rl_model = 'bleu' if bleu_rl else None\n config.rl_ratio = params.rl_ratio\n\n config.num_labels = len(params.tag2idx)\n config.tags = params.idx2tag\n config.pad_tag_id = params.pad_tag_id\n\n config.rules = params.rules\n config.rule_slot_cnts = params.rule_slot_cnts\n config.max_sp_len = params.max_sp_len\n config.additional_special_tokens = tuple(f'[SLOT{x}]' for x in range(params.max_sp_len))\n config.vocab_size += len(config.additional_special_tokens)\n return config\n\n\ndef safe_log(inp, eps=1e-45):\n return (inp + eps).log()\n\n\ndef sample_helper(logits, dim=2):\n samples = Categorical(logits=logits).sample()\n samples_prob = torch.gather(logits, dim, samples.unsqueeze(dim))\n return samples, samples_prob\n\n\ndef cls_loss(dist, refs, masks):\n refs = F.one_hot(refs, dist.shape[-1])\n loss = torch.sum(safe_log(dist) * refs.float(), dim=-1)\n num_tokens = torch.sum(masks).item()\n return -torch.sum(loss * masks) / num_tokens\n","repo_name":"lisjin/hct","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"2"} +{"seq_id":"11172506414","text":"from tkinter import *\nimport tkinter as tk\nimport pyautogui\nimport numpy as np\nfrom pyscreenshot import grab\nimport pytesseract\nfrom googletrans import Translator\nimport PIL\nfrom PIL import Image\nfrom PIL import ImageTk\nfrom time import sleep\n\ndef bwfy(img, th=200, lonly=False):\n l = img.convert('L')\n if lonly:\n return l\n bw = np.asarray(l).copy()\n bw[bw < th] = 0 # Black\n bw[bw >= th] = 255 # White\n # Now we put it back in Pillow/PIL land\n return Image.fromarray(bw)\n\ndef ss():\n global ss_id\n im = bwfy(grab(bbox=(x1, y1, x2-x1, y2-y1)), th=200)\n text = pytesseract.image_to_string(im, lang=L_img[tkvar.get()], config='--psm 7')\n print(text)\n stext = fanyi.translate(text, src=L_trans[tkvar.get()], dest=L_trans[tkvarTo.get()]).text\n print(stext)\n lbl_text.set(' '.join(stext.split()))\n entry.update_idletasks()\n ss_id = root.after(period, ss)\n\ndef ul_pos(event):\n global x1, y1\n x1 = root.winfo_pointerx()\n y1 = root.winfo_pointery()\n print('x1', 'y1', x1, y1)\n\n\ndef br_pos(event):\n global x2, y2\n x2 = root.winfo_pointerx()\n y2 = root.winfo_pointery()\n print('x2', 'y2', x2, y2)\n\ndef jieping():\n global app, ss_id, btn_text\n btn_txt = btn_text.get()\n print('ss id', ss_id)\n if btn_txt == 'Select Screen':\n app.maximize()\n btn_text.set('Stop')\n else:\n root.after_cancel(ss_id)\n btn_text.set('Select Screen')\n print('ss id', ss_id)\n\nclass DrawRectangle(Frame):\n def __init__(self,master):\n Frame.__init__(self,master=None)\n self.x = self.y = 0\n self.canvas = Canvas(self, cursor=\"cross\", width=1, height=1)\n self.maximized = False\n\n self.sbarv=Scrollbar(self,orient=VERTICAL)\n self.sbarh=Scrollbar(self,orient=HORIZONTAL)\n self.sbarv.config(command=self.canvas.yview)\n self.sbarh.config(command=self.canvas.xview)\n\n self.canvas.config(yscrollcommand=self.sbarv.set)\n self.canvas.config(xscrollcommand=self.sbarh.set)\n\n self.canvas.grid(row=0,column=0,sticky=N+S+E+W)\n self.sbarv.grid(row=0,column=1,stick=N+S)\n self.sbarh.grid(row=1,column=0,sticky=E+W)\n\n self.canvas.bind(\"\", self.recover_size)\n self.canvas.bind(\"\", self.on_button_press)\n self.canvas.bind(\"\", self.on_move_press)\n self.canvas.bind(\"\", self.on_button_release)\n\n self.rect = None\n\n self.start_x = None\n self.start_y = None\n\n def maximize(self):\n root.wm_attributes('-alpha', 0.3)\n self.canvas.config(width=root.winfo_screenwidth()-3, height=root.winfo_screenheight()-3)\n self.maximized = True\n\n def recover_size(self):\n root.wm_attributes('-alpha', 1)\n self.canvas.config(width=1, height=1)\n self.maximized = False\n\n def on_button_press(self, event):\n global x1, y1\n x1 = root.winfo_pointerx()\n y1 = root.winfo_pointery()\n print('x1', 'y1', x1, y1)\n # save mouse drag start position\n self.start_x = self.canvas.canvasx(event.x)\n self.start_y = self.canvas.canvasy(event.y)\n\n # create rectangle if not yet exist\n if not self.rect:\n self.rect = self.canvas.create_rectangle(self.x, self.y, 1, 1, outline='green')\n\n def on_move_press(self, event):\n curX = self.canvas.canvasx(event.x)\n curY = self.canvas.canvasy(event.y)\n\n w, h = self.canvas.winfo_width(), self.canvas.winfo_height()\n if event.x > 0.9*w:\n self.canvas.xview_scroll(1, 'units')\n elif event.x < 0.1*w:\n self.canvas.xview_scroll(-1, 'units')\n if event.y > 0.9*h:\n self.canvas.yview_scroll(1, 'units')\n elif event.y < 0.1*h:\n self.canvas.yview_scroll(-1, 'units')\n\n # expand rectangle as you drag the mouse\n self.canvas.coords(self.rect, self.start_x, self.start_y, curX, curY)\n\n def on_button_release(self, event):\n global x2, y2\n x2 = root.winfo_pointerx()\n y2 = root.winfo_pointery()\n print('x2', 'y2', x2, y2)\n global ss_id\n if x2 - x1 > 100 and y2 - y1 > 10:\n self.recover_size()\n ss_id = root.after(period, ss)\n\nx1, y1, x2, y2 = 0,0,0,0\nss_id = None\nperiod = 100\nfanyi = Translator()\nL_img = {'Chinese':'chi_sim','English':'eng','Japanese':'jpn','Korean':'kor'}\nL_trans = {'Chinese':'zh-CN','English':'en','Japanese':'ja','Korean':'ko'}\n\nroot = tk.Tk()# root.bind('